Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 411 → Rev 413

/SPARTAN/trunk/src/proc/scheduler.c
125,13 → 125,13
ASSERT(CPU != NULL);
 
loop:
cpu_priority_high();
interrupts_disable();
 
spinlock_lock(&CPU->lock);
n = CPU->nrdy;
spinlock_unlock(&CPU->lock);
 
cpu_priority_low();
interrupts_enable();
if (n == 0) {
#ifdef __SMP__
155,7 → 155,7
goto loop;
}
 
cpu_priority_high();
interrupts_disable();
i = 0;
retry:
196,7 → 196,7
t->cpu = CPU;
 
t->ticks = us2ticks((i+1)*10000);
t->pri = i; /* eventually correct rq index */
t->priority = i; /* eventually correct rq index */
 
/*
* Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
261,11 → 261,11
*/
void scheduler(void)
{
volatile pri_t pri;
volatile ipl_t ipl;
 
ASSERT(CPU != NULL);
 
pri = cpu_priority_high();
ipl = interrupts_disable();
 
if (haltstate)
halt();
281,16 → 281,16
*/
before_thread_runs();
spinlock_unlock(&THREAD->lock);
cpu_priority_restore(THREAD->saved_context.pri);
interrupts_restore(THREAD->saved_context.ipl);
return;
}
 
/*
* CPU priority of preempted thread is recorded here
* to facilitate scheduler() invocations from
* cpu_priority_high()'ed code (e.g. waitq_sleep_timeout()).
* Interrupt priority level of preempted thread is recorded here
* to facilitate scheduler() invocations from interrupts_disable()'d
* code (e.g. waitq_sleep_timeout()).
*/
THREAD->saved_context.pri = pri;
THREAD->saved_context.ipl = ipl;
}
 
/*
371,7 → 371,7
/*
* Prefer the thread after it's woken up.
*/
THREAD->pri = -1;
THREAD->priority = -1;
 
/*
* We need to release wq->lock which we locked in waitq_sleep().
406,7 → 406,7
THREAD = find_best_thread();
spinlock_lock(&THREAD->lock);
priority = THREAD->pri;
priority = THREAD->priority;
spinlock_unlock(&THREAD->lock);
 
relink_rq(priority);
446,7 → 446,7
THREAD->state = Running;
 
#ifdef SCHEDULER_VERBOSE
printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy);
printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, CPU->nrdy);
#endif
 
/*
472,7 → 472,7
{
thread_t *t;
int count, i, j, k = 0;
pri_t pri;
ipl_t ipl;
 
loop:
/*
486,12 → 486,12
* other CPU's. Note that situation can have changed between two
* passes. Each time get the most up to date counts.
*/
pri = cpu_priority_high();
ipl = interrupts_disable();
spinlock_lock(&CPU->lock);
count = nrdy / config.cpu_active;
count -= CPU->nrdy;
spinlock_unlock(&CPU->lock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
 
if (count <= 0)
goto satisfied;
514,12 → 514,12
if (CPU == cpu)
continue;
 
restart: pri = cpu_priority_high();
restart: ipl = interrupts_disable();
r = &cpu->rq[j];
spinlock_lock(&r->lock);
if (r->n == 0) {
spinlock_unlock(&r->lock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
continue;
}
548,7 → 548,7
if (!spinlock_trylock(&cpu->lock)) {
/* Release all locks and try again. */
spinlock_unlock(&r->lock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
goto restart;
}
cpu->nrdy--;
580,7 → 580,7
thread_ready(t);
 
cpu_priority_restore(pri);
interrupts_restore(ipl);
if (--count == 0)
goto satisfied;
592,7 → 592,7
continue;
}
cpu_priority_restore(pri);
interrupts_restore(ipl);
}
}
 
/SPARTAN/trunk/src/proc/task.c
64,7 → 64,7
*/
task_t *task_create(vm_t *m)
{
pri_t pri;
ipl_t ipl;
task_t *ta;
ta = (task_t *) malloc(sizeof(task_t));
74,11 → 74,11
list_initialize(&ta->tasks_link);
ta->vm = m;
pri = cpu_priority_high();
ipl = interrupts_disable();
spinlock_lock(&tasks_lock);
list_append(&ta->tasks_link, &tasks_head);
spinlock_unlock(&tasks_lock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
}
return ta;
}
/SPARTAN/trunk/src/proc/thread.c
67,7 → 67,7
* makes a call to thread_exit() when its implementing
* function returns.
*
* cpu_priority_high() is assumed.
* interrupts_disable() is assumed.
*
*/
void cushion(void)
79,7 → 79,7
before_thread_runs();
 
spinlock_unlock(&THREAD->lock);
cpu_priority_low();
interrupts_enable();
 
f(arg);
thread_exit();
112,14 → 112,14
{
cpu_t *cpu;
runq_t *r;
pri_t pri;
ipl_t ipl;
int i, avg, send_ipi = 0;
 
pri = cpu_priority_high();
ipl = interrupts_disable();
 
spinlock_lock(&t->lock);
 
i = (t->pri < RQ_COUNT -1) ? ++t->pri : t->pri;
i = (t->priority < RQ_COUNT -1) ? ++t->priority : t->priority;
cpu = CPU;
if (t->flags & X_WIRED) {
148,7 → 148,7
}
spinlock_unlock(&cpu->lock);
 
cpu_priority_restore(pri);
interrupts_restore(ipl);
}
 
 
171,7 → 171,7
 
t = (thread_t *) malloc(sizeof(thread_t));
if (t) {
pri_t pri;
ipl_t ipl;
spinlock_initialize(&t->lock);
180,11 → 180,11
frame_us = frame_alloc(FRAME_KA);
}
 
pri = cpu_priority_high();
ipl = interrupts_disable();
spinlock_lock(&tidlock);
t->tid = ++last_tid;
spinlock_unlock(&tidlock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
memsetb(frame_ks, THREAD_STACK_SIZE, 0);
link_initialize(&t->rq_link);
199,14 → 199,14
the_initialize((the_t *) t->kstack);
 
pri = cpu_priority_high();
t->saved_context.pri = cpu_priority_read();
cpu_priority_restore(pri);
ipl = interrupts_disable();
t->saved_context.ipl = interrupts_read();
interrupts_restore(ipl);
t->thread_code = func;
t->thread_arg = arg;
t->ticks = -1;
t->pri = -1; /* start in rq[0] */
t->priority = -1; /* start in rq[0] */
t->cpu = NULL;
t->flags = 0;
t->state = Entering;
227,7 → 227,7
/*
* Register this thread in the system-wide list.
*/
pri = cpu_priority_high();
ipl = interrupts_disable();
spinlock_lock(&threads_lock);
list_append(&t->threads_link, &threads_head);
spinlock_unlock(&threads_lock);
239,7 → 239,7
list_append(&t->th_link, &task->th_head);
spinlock_unlock(&task->lock);
 
cpu_priority_restore(pri);
interrupts_restore(ipl);
}
 
return t;
254,14 → 254,14
*/
void thread_exit(void)
{
pri_t pri;
ipl_t ipl;
 
restart:
pri = cpu_priority_high();
ipl = interrupts_disable();
spinlock_lock(&THREAD->lock);
if (THREAD->timeout_pending) { /* busy waiting for timeouts in progress */
spinlock_unlock(&THREAD->lock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
goto restart;
}
THREAD->state = Exiting;
311,12 → 311,12
*/
void thread_register_call_me(void (* call_me)(void *), void *call_me_with)
{
pri_t pri;
ipl_t ipl;
pri = cpu_priority_high();
ipl = interrupts_disable();
spinlock_lock(&THREAD->lock);
THREAD->call_me = call_me;
THREAD->call_me_with = call_me_with;
spinlock_unlock(&THREAD->lock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
}
/SPARTAN/trunk/src/main/kinit.c
64,7 → 64,7
thread_t *t;
int i;
 
cpu_priority_high();
interrupts_disable();
 
#ifdef __SMP__
if (config.cpu_count > 1) {
115,7 → 115,7
}
#endif /* __SMP__ */
 
cpu_priority_low();
interrupts_enable();
 
#ifdef __USERSPACE__
/*
/SPARTAN/trunk/src/main/main.c
109,7 → 109,7
*
* Initializes the kernel by bootstrap CPU.
*
* Assuming cpu_priority_high().
* Assuming interrupts_disable().
*
*/
void main_bsp(void)
208,7 → 208,7
* Executed by application processors, temporary stack
* is at ctx.sp which was set during BP boot.
*
* Assuming cpu_priority_high().
* Assuming interrupts_disable()'d.
*
*/
void main_ap(void)
/SPARTAN/trunk/src/synch/rwlock.c
96,14 → 96,14
*/
int _rwlock_write_lock_timeout(rwlock_t *rwl, __u32 usec, int trylock)
{
pri_t pri;
ipl_t ipl;
int rc;
pri = cpu_priority_high();
ipl = interrupts_disable();
spinlock_lock(&THREAD->lock);
THREAD->rwlock_holder_type = RWLOCK_WRITER;
spinlock_unlock(&THREAD->lock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
 
/*
* Writers take the easy part.
118,7 → 118,7
* No claims about its holder can be made.
*/
pri = cpu_priority_high();
ipl = interrupts_disable();
spinlock_lock(&rwl->lock);
/*
* Now when rwl is locked, we can inspect it again.
128,7 → 128,7
if (rwl->readers_in)
let_others_in(rwl, ALLOW_READERS_ONLY);
spinlock_unlock(&rwl->lock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
}
return rc;
151,9 → 151,9
int _rwlock_read_lock_timeout(rwlock_t *rwl, __u32 usec, int trylock)
{
int rc;
pri_t pri;
ipl_t ipl;
pri = cpu_priority_high();
ipl = interrupts_disable();
spinlock_lock(&THREAD->lock);
THREAD->rwlock_holder_type = RWLOCK_READER;
spinlock_unlock(&THREAD->lock);
204,7 → 204,7
case ESYNCH_TIMEOUT:
/*
* The sleep timeouted.
* We just restore the cpu priority.
* We just restore interrupt priority level.
*/
case ESYNCH_OK_BLOCKED:
/*
215,7 → 215,7
* Same time means both events happen atomically when
* rwl->lock is held.)
*/
cpu_priority_restore(pri);
interrupts_restore(ipl);
break;
case ESYNCH_OK_ATOMIC:
panic("_mutex_lock_timeout()==ESYNCH_OK_ATOMIC");
236,7 → 236,7
rwl->readers_in++;
spinlock_unlock(&rwl->lock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
 
return ESYNCH_OK_ATOMIC;
}
251,13 → 251,13
*/
void rwlock_write_unlock(rwlock_t *rwl)
{
pri_t pri;
ipl_t ipl;
pri = cpu_priority_high();
ipl = interrupts_disable();
spinlock_lock(&rwl->lock);
let_others_in(rwl, ALLOW_ALL);
spinlock_unlock(&rwl->lock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
}
 
272,14 → 272,14
*/
void rwlock_read_unlock(rwlock_t *rwl)
{
pri_t pri;
ipl_t ipl;
 
pri = cpu_priority_high();
ipl = interrupts_disable();
spinlock_lock(&rwl->lock);
if (!--rwl->readers_in)
let_others_in(rwl, ALLOW_ALL);
spinlock_unlock(&rwl->lock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
}
 
 
289,7 → 289,7
* to waiting readers or a writer.
*
* Must be called with rwl->lock locked.
* Must be called with cpu_priority_high'ed.
* Must be called with interrupts_disable()'d.
*
* @param rwl Reader/Writer lock.
* @param readers_only See the description below.
/SPARTAN/trunk/src/synch/semaphore.c
42,17 → 42,17
*/
void semaphore_initialize(semaphore_t *s, int val)
{
pri_t pri;
ipl_t ipl;
waitq_initialize(&s->wq);
pri = cpu_priority_high();
ipl = interrupts_disable();
 
spinlock_lock(&s->wq.lock);
s->wq.missed_wakeups = val;
spinlock_unlock(&s->wq.lock);
 
cpu_priority_restore(pri);
interrupts_restore(ipl);
}
 
/** Semaphore down
/SPARTAN/trunk/src/synch/waitq.c
137,11 → 137,11
*/
int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int nonblocking)
{
volatile pri_t pri; /* must be live after context_restore() */
volatile ipl_t ipl; /* must be live after context_restore() */
restart:
pri = cpu_priority_high();
ipl = interrupts_disable();
/*
* Busy waiting for a delayed timeout.
153,7 → 153,7
spinlock_lock(&THREAD->lock);
if (THREAD->timeout_pending) {
spinlock_unlock(&THREAD->lock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
goto restart;
}
spinlock_unlock(&THREAD->lock);
164,7 → 164,7
if (wq->missed_wakeups) {
wq->missed_wakeups--;
spinlock_unlock(&wq->lock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
return ESYNCH_OK_ATOMIC;
}
else {
171,7 → 171,7
if (nonblocking && (usec == 0)) {
/* return immediatelly instead of going to sleep */
spinlock_unlock(&wq->lock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
return ESYNCH_WOULD_BLOCK;
}
}
189,7 → 189,7
*/
before_thread_runs();
spinlock_unlock(&THREAD->lock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
return ESYNCH_TIMEOUT;
}
THREAD->timeout_pending = 1;
207,7 → 207,7
spinlock_unlock(&THREAD->lock);
 
scheduler(); /* wq->lock is released in scheduler_separated_stack() */
cpu_priority_restore(pri);
interrupts_restore(ipl);
return ESYNCH_OK_BLOCKED;
}
228,15 → 228,15
*/
void waitq_wakeup(waitq_t *wq, int all)
{
pri_t pri;
ipl_t ipl;
 
pri = cpu_priority_high();
ipl = interrupts_disable();
spinlock_lock(&wq->lock);
 
_waitq_wakeup_unsafe(wq, all);
 
spinlock_unlock(&wq->lock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
}
 
/** Internal SMP- and IRQ-unsafe version of waitq_wakeup()
/SPARTAN/trunk/src/debug/print.c
285,7 → 285,7
va_start(ap, fmt);
 
irqpri = cpu_priority_high();
irqpri = interrupts_disable();
spinlock_lock(&printflock);
 
while (c = fmt[i++]) {
400,7 → 400,7
 
out:
spinlock_unlock(&printflock);
cpu_priority_restore(irqpri);
interrupts_restore(irqpri);
va_end(ap);
}
/SPARTAN/trunk/src/lib/func.c
43,7 → 43,7
void halt(void)
{
haltstate = 1;
cpu_priority_high();
interrupts_disable();
if (CPU)
printf("cpu%d: halted\n", CPU->id);
else
/SPARTAN/trunk/src/mm/vm.c
89,13 → 89,13
 
vm_area_t *vm_area_create(vm_t *m, vm_type_t type, size_t size, __address addr)
{
pri_t pri;
ipl_t ipl;
vm_area_t *a;
if (addr % PAGE_SIZE)
panic("addr not aligned to a page boundary");
pri = cpu_priority_high();
ipl = interrupts_disable();
spinlock_lock(&m->lock);
/*
110,7 → 110,7
if (!a->mapping) {
free(a);
spinlock_unlock(&m->lock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
return NULL;
}
129,7 → 129,7
}
 
spinlock_unlock(&m->lock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
return a;
}
141,9 → 141,9
void vm_area_map(vm_area_t *a, vm_t *m)
{
int i, flags;
pri_t pri;
ipl_t ipl;
pri = cpu_priority_high();
ipl = interrupts_disable();
spinlock_lock(&m->lock);
spinlock_lock(&a->lock);
 
165,15 → 165,15
spinlock_unlock(&a->lock);
spinlock_unlock(&m->lock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
}
 
void vm_area_unmap(vm_area_t *a, vm_t *m)
{
int i;
pri_t pri;
ipl_t ipl;
pri = cpu_priority_high();
ipl = interrupts_disable();
spinlock_lock(&m->lock);
spinlock_lock(&a->lock);
 
183,15 → 183,15
spinlock_unlock(&a->lock);
spinlock_unlock(&m->lock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
}
 
void vm_install(vm_t *m)
{
link_t *l;
pri_t pri;
ipl_t ipl;
pri = cpu_priority_high();
ipl = interrupts_disable();
 
tlb_shootdown_start();
spinlock_lock(&m->lock);
202,7 → 202,7
spinlock_unlock(&m->lock);
tlb_shootdown_finalize();
 
cpu_priority_restore(pri);
interrupts_restore(ipl);
 
vm_install_arch(m);
/SPARTAN/trunk/src/mm/frame.c
68,7 → 68,7
*/
__address frame_alloc(int flags)
{
pri_t pri;
ipl_t ipl;
link_t *cur, *tmp;
zone_t *z;
zone_t *zone = NULL;
76,7 → 76,7
__address v;
loop:
pri = cpu_priority_high();
ipl = interrupts_disable();
spinlock_lock(&zone_head_lock);
/*
104,7 → 104,7
* TODO: Sleep until frames are available again.
*/
spinlock_unlock(&zone_head_lock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
 
panic("Sleep not implemented.\n");
goto loop;
126,7 → 126,7
spinlock_unlock(&zone->lock);
spinlock_unlock(&zone_head_lock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
return v;
}
141,7 → 141,7
*/
void frame_free(__address addr)
{
pri_t pri;
ipl_t ipl;
link_t *cur;
zone_t *z;
zone_t *zone = NULL;
149,7 → 149,7
ASSERT(addr % FRAME_SIZE == 0);
pri = cpu_priority_high();
ipl = interrupts_disable();
spinlock_lock(&zone_head_lock);
/*
187,7 → 187,7
spinlock_unlock(&zone->lock);
spinlock_unlock(&zone_head_lock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
}
 
/** Mark frame not free.
199,7 → 199,7
*/
void frame_not_free(__address addr)
{
pri_t pri;
ipl_t ipl;
link_t *cur;
zone_t *z;
zone_t *zone = NULL;
207,7 → 207,7
ASSERT(addr % FRAME_SIZE == 0);
pri = cpu_priority_high();
ipl = interrupts_disable();
spinlock_lock(&zone_head_lock);
/*
246,7 → 246,7
spinlock_unlock(&zone->lock);
spinlock_unlock(&zone_head_lock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
}
 
/** Mark frame region not free.
335,15 → 335,15
*/
void zone_attach(zone_t *zone)
{
pri_t pri;
ipl_t ipl;
pri = cpu_priority_high();
ipl = interrupts_disable();
spinlock_lock(&zone_head_lock);
list_append(&zone->link, &zone_head);
spinlock_unlock(&zone_head_lock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
}
 
/** Initialize frame structure
/SPARTAN/trunk/src/mm/heap.c
60,7 → 60,7
*/
void *early_malloc(size_t size)
{
pri_t pri;
ipl_t ipl;
chunk_t *x, *y, *z;
 
if (size == 0)
67,7 → 67,7
panic("zero-size allocation request");
x = chunk0;
pri = cpu_priority_high();
ipl = interrupts_disable();
spinlock_lock(&heaplock);
while (x) {
if (x->used || x->size < size) {
84,7 → 84,7
*/
if (x->size < size + sizeof(chunk_t) + 1) {
spinlock_unlock(&heaplock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
return &x->data[0];
}
 
105,18 → 105,18
x->size = size;
x->next = y;
spinlock_unlock(&heaplock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
 
return &x->data[0];
}
spinlock_unlock(&heaplock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
return NULL;
}
 
void early_free(void *ptr)
{
pri_t pri;
ipl_t ipl;
chunk_t *x, *y, *z;
 
if (!ptr)
127,7 → 127,7
if (y->used != 1)
panic("freeing unused/damaged chunk");
 
pri = cpu_priority_high();
ipl = interrupts_disable();
spinlock_lock(&heaplock);
x = y->prev;
z = y->next;
150,5 → 150,5
}
y->used = 0;
spinlock_unlock(&heaplock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
}
/SPARTAN/trunk/src/time/delay.c
42,12 → 42,15
*/
void delay(__u32 usec)
{
pri_t pri;
ipl_t ipl;
/* The delay loop is calibrated for each and every
CPU in the system. Therefore it is necessary to
cpu_priority_high() before calling the asm_delay_loop(). */
pri = cpu_priority_high();
/*
* The delay loop is calibrated for each and every
* CPU in the system. Therefore it is necessary to
* call interrupts_disable() before calling the
* asm_delay_loop().
*/
ipl = interrupts_disable();
asm_delay_loop(usec * CPU->delay_loop_const);
cpu_priority_restore(pri);
interrupts_restore(ipl);
}
/SPARTAN/trunk/src/time/timeout.c
100,10 → 100,10
{
timeout_t *hlp;
link_t *l, *m;
pri_t pri;
ipl_t ipl;
__u64 sum;
 
pri = cpu_priority_high();
ipl = interrupts_disable();
spinlock_lock(&CPU->timeoutlock);
spinlock_lock(&t->lock);
 
152,7 → 152,7
 
spinlock_unlock(&t->lock);
spinlock_unlock(&CPU->timeoutlock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
}
 
 
168,19 → 168,19
{
timeout_t *hlp;
link_t *l;
pri_t pri;
ipl_t ipl;
 
grab_locks:
pri = cpu_priority_high();
ipl = interrupts_disable();
spinlock_lock(&t->lock);
if (!t->cpu) {
spinlock_unlock(&t->lock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
return false;
}
if (!spinlock_trylock(&t->cpu->timeoutlock)) {
spinlock_unlock(&t->lock);
cpu_priority_restore(pri);
interrupts_restore(ipl);
goto grab_locks;
}
203,6 → 203,6
timeout_reinitialize(t);
spinlock_unlock(&t->lock);
 
cpu_priority_restore(pri);
interrupts_restore(ipl);
return true;
}
/SPARTAN/trunk/src/time/clock.c
44,7 → 44,7
/** Clock routine
*
* Clock routine executed from clock interrupt handler
* (assuming cpu_priority_high()). Runs expired timeouts
* (assuming interrupts_disable()'d). Runs expired timeouts
* and preemptive scheduling.
*
*/