/SPARTAN/trunk/test/synch/rwlock4/test.c |
---|
60,7 → 60,7 |
__u32 random(__u32 max) |
{ |
__u32 rc; |
pri_t pri; |
ipl_t ipl; |
spinlock_lock(&lock); |
rc = seed % max; |
/SPARTAN/trunk/include/arch.h |
---|
46,17 → 46,17 |
#define early_mapping(stack, size) |
#endif /* early_mapping */ |
/* |
/** |
* For each possible kernel stack, structure |
* of the following type will be placed at |
* the bottom of the stack. |
*/ |
struct the { |
int preemption_disabled; |
thread_t *thread; /* current thread */ |
task_t *task; /* current task */ |
cpu_t *cpu; /* executing cpu */ |
vm_t *vm; /* current vm */ |
int preemption_disabled; /**< Preemption disabled counter. */ |
thread_t *thread; /**< Current thread. */ |
task_t *task; /**< Current task. */ |
cpu_t *cpu; /**< Executing cpu. */ |
vm_t *vm; /**< Current vm. */ |
}; |
#define THE ((the_t *)(get_stack_base())) |
69,9 → 69,9 |
extern void arch_late_init(void); |
extern void calibrate_delay_loop(void); |
extern pri_t cpu_priority_high(void); |
extern pri_t cpu_priority_low(void); |
extern void cpu_priority_restore(pri_t pri); |
extern pri_t cpu_priority_read(void); |
extern ipl_t interrupts_disable(void); |
extern ipl_t interrupts_enable(void); |
extern void interrupts_restore(ipl_t ipl); |
extern ipl_t interrupts_read(void); |
#endif |
/SPARTAN/trunk/include/proc/thread.h |
---|
100,7 → 100,7 |
__u64 ticks; /**< Ticks before preemption. */ |
int pri; /**< Thread's priority. Implemented as index of run queue. */ |
int priority; /**< Thread's priority. Implemented as index to CPU->rq */ |
__u32 tid; /**< Thread ID. */ |
ARCH_THREAD_DATA; /**< Architecture-specific data. */ |
/SPARTAN/trunk/src/proc/scheduler.c |
---|
125,13 → 125,13 |
ASSERT(CPU != NULL); |
loop: |
cpu_priority_high(); |
interrupts_disable(); |
spinlock_lock(&CPU->lock); |
n = CPU->nrdy; |
spinlock_unlock(&CPU->lock); |
cpu_priority_low(); |
interrupts_enable(); |
if (n == 0) { |
#ifdef __SMP__ |
155,7 → 155,7 |
goto loop; |
} |
cpu_priority_high(); |
interrupts_disable(); |
i = 0; |
retry: |
196,7 → 196,7 |
t->cpu = CPU; |
t->ticks = us2ticks((i+1)*10000); |
t->pri = i; /* eventually correct rq index */ |
t->priority = i; /* eventually correct rq index */ |
/* |
* Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge. |
261,11 → 261,11 |
*/ |
void scheduler(void) |
{ |
volatile pri_t pri; |
volatile ipl_t ipl; |
ASSERT(CPU != NULL); |
pri = cpu_priority_high(); |
ipl = interrupts_disable(); |
if (haltstate) |
halt(); |
281,16 → 281,16 |
*/ |
before_thread_runs(); |
spinlock_unlock(&THREAD->lock); |
cpu_priority_restore(THREAD->saved_context.pri); |
interrupts_restore(THREAD->saved_context.ipl); |
return; |
} |
/* |
* CPU priority of preempted thread is recorded here |
* to facilitate scheduler() invocations from |
* cpu_priority_high()'ed code (e.g. waitq_sleep_timeout()). |
* Interrupt priority level of preempted thread is recorded here |
* to facilitate scheduler() invocations from interrupts_disable()'d |
* code (e.g. waitq_sleep_timeout()). |
*/ |
THREAD->saved_context.pri = pri; |
THREAD->saved_context.ipl = ipl; |
} |
/* |
371,7 → 371,7 |
/* |
* Prefer the thread after it's woken up. |
*/ |
THREAD->pri = -1; |
THREAD->priority = -1; |
/* |
* We need to release wq->lock which we locked in waitq_sleep(). |
406,7 → 406,7 |
THREAD = find_best_thread(); |
spinlock_lock(&THREAD->lock); |
priority = THREAD->pri; |
priority = THREAD->priority; |
spinlock_unlock(&THREAD->lock); |
relink_rq(priority); |
446,7 → 446,7 |
THREAD->state = Running; |
#ifdef SCHEDULER_VERBOSE |
printf("cpu%d: tid %d (pri=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->pri, THREAD->ticks, CPU->nrdy); |
printf("cpu%d: tid %d (priority=%d,ticks=%d,nrdy=%d)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, CPU->nrdy); |
#endif |
/* |
472,7 → 472,7 |
{ |
thread_t *t; |
int count, i, j, k = 0; |
pri_t pri; |
ipl_t ipl; |
loop: |
/* |
486,12 → 486,12 |
* other CPU's. Note that situation can have changed between two |
* passes. Each time get the most up to date counts. |
*/ |
pri = cpu_priority_high(); |
ipl = interrupts_disable(); |
spinlock_lock(&CPU->lock); |
count = nrdy / config.cpu_active; |
count -= CPU->nrdy; |
spinlock_unlock(&CPU->lock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
if (count <= 0) |
goto satisfied; |
514,12 → 514,12 |
if (CPU == cpu) |
continue; |
restart: pri = cpu_priority_high(); |
restart: ipl = interrupts_disable(); |
r = &cpu->rq[j]; |
spinlock_lock(&r->lock); |
if (r->n == 0) { |
spinlock_unlock(&r->lock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
continue; |
} |
548,7 → 548,7 |
if (!spinlock_trylock(&cpu->lock)) { |
/* Release all locks and try again. */ |
spinlock_unlock(&r->lock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
goto restart; |
} |
cpu->nrdy--; |
580,7 → 580,7 |
thread_ready(t); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
if (--count == 0) |
goto satisfied; |
592,7 → 592,7 |
continue; |
} |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
} |
} |
/SPARTAN/trunk/src/proc/task.c |
---|
64,7 → 64,7 |
*/ |
task_t *task_create(vm_t *m) |
{ |
pri_t pri; |
ipl_t ipl; |
task_t *ta; |
ta = (task_t *) malloc(sizeof(task_t)); |
74,11 → 74,11 |
list_initialize(&ta->tasks_link); |
ta->vm = m; |
pri = cpu_priority_high(); |
ipl = interrupts_disable(); |
spinlock_lock(&tasks_lock); |
list_append(&ta->tasks_link, &tasks_head); |
spinlock_unlock(&tasks_lock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
} |
return ta; |
} |
/SPARTAN/trunk/src/proc/thread.c |
---|
67,7 → 67,7 |
* makes a call to thread_exit() when its implementing |
* function returns. |
* |
* cpu_priority_high() is assumed. |
* interrupts_disable() is assumed. |
* |
*/ |
void cushion(void) |
79,7 → 79,7 |
before_thread_runs(); |
spinlock_unlock(&THREAD->lock); |
cpu_priority_low(); |
interrupts_enable(); |
f(arg); |
thread_exit(); |
112,14 → 112,14 |
{ |
cpu_t *cpu; |
runq_t *r; |
pri_t pri; |
ipl_t ipl; |
int i, avg, send_ipi = 0; |
pri = cpu_priority_high(); |
ipl = interrupts_disable(); |
spinlock_lock(&t->lock); |
i = (t->pri < RQ_COUNT -1) ? ++t->pri : t->pri; |
i = (t->priority < RQ_COUNT -1) ? ++t->priority : t->priority; |
cpu = CPU; |
if (t->flags & X_WIRED) { |
148,7 → 148,7 |
} |
spinlock_unlock(&cpu->lock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
} |
171,7 → 171,7 |
t = (thread_t *) malloc(sizeof(thread_t)); |
if (t) { |
pri_t pri; |
ipl_t ipl; |
spinlock_initialize(&t->lock); |
180,11 → 180,11 |
frame_us = frame_alloc(FRAME_KA); |
} |
pri = cpu_priority_high(); |
ipl = interrupts_disable(); |
spinlock_lock(&tidlock); |
t->tid = ++last_tid; |
spinlock_unlock(&tidlock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
memsetb(frame_ks, THREAD_STACK_SIZE, 0); |
link_initialize(&t->rq_link); |
199,14 → 199,14 |
the_initialize((the_t *) t->kstack); |
pri = cpu_priority_high(); |
t->saved_context.pri = cpu_priority_read(); |
cpu_priority_restore(pri); |
ipl = interrupts_disable(); |
t->saved_context.ipl = interrupts_read(); |
interrupts_restore(ipl); |
t->thread_code = func; |
t->thread_arg = arg; |
t->ticks = -1; |
t->pri = -1; /* start in rq[0] */ |
t->priority = -1; /* start in rq[0] */ |
t->cpu = NULL; |
t->flags = 0; |
t->state = Entering; |
227,7 → 227,7 |
/* |
* Register this thread in the system-wide list. |
*/ |
pri = cpu_priority_high(); |
ipl = interrupts_disable(); |
spinlock_lock(&threads_lock); |
list_append(&t->threads_link, &threads_head); |
spinlock_unlock(&threads_lock); |
239,7 → 239,7 |
list_append(&t->th_link, &task->th_head); |
spinlock_unlock(&task->lock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
} |
return t; |
254,14 → 254,14 |
*/ |
void thread_exit(void) |
{ |
pri_t pri; |
ipl_t ipl; |
restart: |
pri = cpu_priority_high(); |
ipl = interrupts_disable(); |
spinlock_lock(&THREAD->lock); |
if (THREAD->timeout_pending) { /* busy waiting for timeouts in progress */ |
spinlock_unlock(&THREAD->lock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
goto restart; |
} |
THREAD->state = Exiting; |
311,12 → 311,12 |
*/ |
void thread_register_call_me(void (* call_me)(void *), void *call_me_with) |
{ |
pri_t pri; |
ipl_t ipl; |
pri = cpu_priority_high(); |
ipl = interrupts_disable(); |
spinlock_lock(&THREAD->lock); |
THREAD->call_me = call_me; |
THREAD->call_me_with = call_me_with; |
spinlock_unlock(&THREAD->lock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
} |
/SPARTAN/trunk/src/main/kinit.c |
---|
64,7 → 64,7 |
thread_t *t; |
int i; |
cpu_priority_high(); |
interrupts_disable(); |
#ifdef __SMP__ |
if (config.cpu_count > 1) { |
115,7 → 115,7 |
} |
#endif /* __SMP__ */ |
cpu_priority_low(); |
interrupts_enable(); |
#ifdef __USERSPACE__ |
/* |
/SPARTAN/trunk/src/main/main.c |
---|
109,7 → 109,7 |
* |
* Initializes the kernel by bootstrap CPU. |
* |
* Assuming cpu_priority_high(). |
* Assuming interrupts_disable(). |
* |
*/ |
void main_bsp(void) |
208,7 → 208,7 |
* Executed by application processors, temporary stack |
* is at ctx.sp which was set during BP boot. |
* |
* Assuming cpu_priority_high(). |
* Assuming interrupts_disable()'d. |
* |
*/ |
void main_ap(void) |
/SPARTAN/trunk/src/synch/rwlock.c |
---|
96,14 → 96,14 |
*/ |
int _rwlock_write_lock_timeout(rwlock_t *rwl, __u32 usec, int trylock) |
{ |
pri_t pri; |
ipl_t ipl; |
int rc; |
pri = cpu_priority_high(); |
ipl = interrupts_disable(); |
spinlock_lock(&THREAD->lock); |
THREAD->rwlock_holder_type = RWLOCK_WRITER; |
spinlock_unlock(&THREAD->lock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
/* |
* Writers take the easy part. |
118,7 → 118,7 |
* No claims about its holder can be made. |
*/ |
pri = cpu_priority_high(); |
ipl = interrupts_disable(); |
spinlock_lock(&rwl->lock); |
/* |
* Now when rwl is locked, we can inspect it again. |
128,7 → 128,7 |
if (rwl->readers_in) |
let_others_in(rwl, ALLOW_READERS_ONLY); |
spinlock_unlock(&rwl->lock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
} |
return rc; |
151,9 → 151,9 |
int _rwlock_read_lock_timeout(rwlock_t *rwl, __u32 usec, int trylock) |
{ |
int rc; |
pri_t pri; |
ipl_t ipl; |
pri = cpu_priority_high(); |
ipl = interrupts_disable(); |
spinlock_lock(&THREAD->lock); |
THREAD->rwlock_holder_type = RWLOCK_READER; |
spinlock_unlock(&THREAD->lock); |
204,7 → 204,7 |
case ESYNCH_TIMEOUT: |
/* |
* The sleep timeouted. |
* We just restore the cpu priority. |
* We just restore interrupt priority level. |
*/ |
case ESYNCH_OK_BLOCKED: |
/* |
215,7 → 215,7 |
* Same time means both events happen atomically when |
* rwl->lock is held.) |
*/ |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
break; |
case ESYNCH_OK_ATOMIC: |
panic("_mutex_lock_timeout()==ESYNCH_OK_ATOMIC"); |
236,7 → 236,7 |
rwl->readers_in++; |
spinlock_unlock(&rwl->lock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
return ESYNCH_OK_ATOMIC; |
} |
251,13 → 251,13 |
*/ |
void rwlock_write_unlock(rwlock_t *rwl) |
{ |
pri_t pri; |
ipl_t ipl; |
pri = cpu_priority_high(); |
ipl = interrupts_disable(); |
spinlock_lock(&rwl->lock); |
let_others_in(rwl, ALLOW_ALL); |
spinlock_unlock(&rwl->lock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
} |
272,14 → 272,14 |
*/ |
void rwlock_read_unlock(rwlock_t *rwl) |
{ |
pri_t pri; |
ipl_t ipl; |
pri = cpu_priority_high(); |
ipl = interrupts_disable(); |
spinlock_lock(&rwl->lock); |
if (!--rwl->readers_in) |
let_others_in(rwl, ALLOW_ALL); |
spinlock_unlock(&rwl->lock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
} |
289,7 → 289,7 |
* to waiting readers or a writer. |
* |
* Must be called with rwl->lock locked. |
* Must be called with cpu_priority_high'ed. |
* Must be called with interrupts_disable()'d. |
* |
* @param rwl Reader/Writer lock. |
* @param readers_only See the description below. |
/SPARTAN/trunk/src/synch/semaphore.c |
---|
42,17 → 42,17 |
*/ |
void semaphore_initialize(semaphore_t *s, int val) |
{ |
pri_t pri; |
ipl_t ipl; |
waitq_initialize(&s->wq); |
pri = cpu_priority_high(); |
ipl = interrupts_disable(); |
spinlock_lock(&s->wq.lock); |
s->wq.missed_wakeups = val; |
spinlock_unlock(&s->wq.lock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
} |
/** Semaphore down |
/SPARTAN/trunk/src/synch/waitq.c |
---|
137,11 → 137,11 |
*/ |
int waitq_sleep_timeout(waitq_t *wq, __u32 usec, int nonblocking) |
{ |
volatile pri_t pri; /* must be live after context_restore() */ |
volatile ipl_t ipl; /* must be live after context_restore() */ |
restart: |
pri = cpu_priority_high(); |
ipl = interrupts_disable(); |
/* |
* Busy waiting for a delayed timeout. |
153,7 → 153,7 |
spinlock_lock(&THREAD->lock); |
if (THREAD->timeout_pending) { |
spinlock_unlock(&THREAD->lock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
goto restart; |
} |
spinlock_unlock(&THREAD->lock); |
164,7 → 164,7 |
if (wq->missed_wakeups) { |
wq->missed_wakeups--; |
spinlock_unlock(&wq->lock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
return ESYNCH_OK_ATOMIC; |
} |
else { |
171,7 → 171,7 |
if (nonblocking && (usec == 0)) { |
/* return immediatelly instead of going to sleep */ |
spinlock_unlock(&wq->lock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
return ESYNCH_WOULD_BLOCK; |
} |
} |
189,7 → 189,7 |
*/ |
before_thread_runs(); |
spinlock_unlock(&THREAD->lock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
return ESYNCH_TIMEOUT; |
} |
THREAD->timeout_pending = 1; |
207,7 → 207,7 |
spinlock_unlock(&THREAD->lock); |
scheduler(); /* wq->lock is released in scheduler_separated_stack() */ |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
return ESYNCH_OK_BLOCKED; |
} |
228,15 → 228,15 |
*/ |
void waitq_wakeup(waitq_t *wq, int all) |
{ |
pri_t pri; |
ipl_t ipl; |
pri = cpu_priority_high(); |
ipl = interrupts_disable(); |
spinlock_lock(&wq->lock); |
_waitq_wakeup_unsafe(wq, all); |
spinlock_unlock(&wq->lock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
} |
/** Internal SMP- and IRQ-unsafe version of waitq_wakeup() |
/SPARTAN/trunk/src/debug/print.c |
---|
285,7 → 285,7 |
va_start(ap, fmt); |
irqpri = cpu_priority_high(); |
irqpri = interrupts_disable(); |
spinlock_lock(&printflock); |
while (c = fmt[i++]) { |
400,7 → 400,7 |
out: |
spinlock_unlock(&printflock); |
cpu_priority_restore(irqpri); |
interrupts_restore(irqpri); |
va_end(ap); |
} |
/SPARTAN/trunk/src/lib/func.c |
---|
43,7 → 43,7 |
void halt(void) |
{ |
haltstate = 1; |
cpu_priority_high(); |
interrupts_disable(); |
if (CPU) |
printf("cpu%d: halted\n", CPU->id); |
else |
/SPARTAN/trunk/src/mm/vm.c |
---|
89,13 → 89,13 |
vm_area_t *vm_area_create(vm_t *m, vm_type_t type, size_t size, __address addr) |
{ |
pri_t pri; |
ipl_t ipl; |
vm_area_t *a; |
if (addr % PAGE_SIZE) |
panic("addr not aligned to a page boundary"); |
pri = cpu_priority_high(); |
ipl = interrupts_disable(); |
spinlock_lock(&m->lock); |
/* |
110,7 → 110,7 |
if (!a->mapping) { |
free(a); |
spinlock_unlock(&m->lock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
return NULL; |
} |
129,7 → 129,7 |
} |
spinlock_unlock(&m->lock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
return a; |
} |
141,9 → 141,9 |
void vm_area_map(vm_area_t *a, vm_t *m) |
{ |
int i, flags; |
pri_t pri; |
ipl_t ipl; |
pri = cpu_priority_high(); |
ipl = interrupts_disable(); |
spinlock_lock(&m->lock); |
spinlock_lock(&a->lock); |
165,15 → 165,15 |
spinlock_unlock(&a->lock); |
spinlock_unlock(&m->lock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
} |
void vm_area_unmap(vm_area_t *a, vm_t *m) |
{ |
int i; |
pri_t pri; |
ipl_t ipl; |
pri = cpu_priority_high(); |
ipl = interrupts_disable(); |
spinlock_lock(&m->lock); |
spinlock_lock(&a->lock); |
183,15 → 183,15 |
spinlock_unlock(&a->lock); |
spinlock_unlock(&m->lock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
} |
void vm_install(vm_t *m) |
{ |
link_t *l; |
pri_t pri; |
ipl_t ipl; |
pri = cpu_priority_high(); |
ipl = interrupts_disable(); |
tlb_shootdown_start(); |
spinlock_lock(&m->lock); |
202,7 → 202,7 |
spinlock_unlock(&m->lock); |
tlb_shootdown_finalize(); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
vm_install_arch(m); |
/SPARTAN/trunk/src/mm/frame.c |
---|
68,7 → 68,7 |
*/ |
__address frame_alloc(int flags) |
{ |
pri_t pri; |
ipl_t ipl; |
link_t *cur, *tmp; |
zone_t *z; |
zone_t *zone = NULL; |
76,7 → 76,7 |
__address v; |
loop: |
pri = cpu_priority_high(); |
ipl = interrupts_disable(); |
spinlock_lock(&zone_head_lock); |
/* |
104,7 → 104,7 |
* TODO: Sleep until frames are available again. |
*/ |
spinlock_unlock(&zone_head_lock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
panic("Sleep not implemented.\n"); |
goto loop; |
126,7 → 126,7 |
spinlock_unlock(&zone->lock); |
spinlock_unlock(&zone_head_lock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
return v; |
} |
141,7 → 141,7 |
*/ |
void frame_free(__address addr) |
{ |
pri_t pri; |
ipl_t ipl; |
link_t *cur; |
zone_t *z; |
zone_t *zone = NULL; |
149,7 → 149,7 |
ASSERT(addr % FRAME_SIZE == 0); |
pri = cpu_priority_high(); |
ipl = interrupts_disable(); |
spinlock_lock(&zone_head_lock); |
/* |
187,7 → 187,7 |
spinlock_unlock(&zone->lock); |
spinlock_unlock(&zone_head_lock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
} |
/** Mark frame not free. |
199,7 → 199,7 |
*/ |
void frame_not_free(__address addr) |
{ |
pri_t pri; |
ipl_t ipl; |
link_t *cur; |
zone_t *z; |
zone_t *zone = NULL; |
207,7 → 207,7 |
ASSERT(addr % FRAME_SIZE == 0); |
pri = cpu_priority_high(); |
ipl = interrupts_disable(); |
spinlock_lock(&zone_head_lock); |
/* |
246,7 → 246,7 |
spinlock_unlock(&zone->lock); |
spinlock_unlock(&zone_head_lock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
} |
/** Mark frame region not free. |
335,15 → 335,15 |
*/ |
void zone_attach(zone_t *zone) |
{ |
pri_t pri; |
ipl_t ipl; |
pri = cpu_priority_high(); |
ipl = interrupts_disable(); |
spinlock_lock(&zone_head_lock); |
list_append(&zone->link, &zone_head); |
spinlock_unlock(&zone_head_lock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
} |
/** Initialize frame structure |
/SPARTAN/trunk/src/mm/heap.c |
---|
60,7 → 60,7 |
*/ |
void *early_malloc(size_t size) |
{ |
pri_t pri; |
ipl_t ipl; |
chunk_t *x, *y, *z; |
if (size == 0) |
67,7 → 67,7 |
panic("zero-size allocation request"); |
x = chunk0; |
pri = cpu_priority_high(); |
ipl = interrupts_disable(); |
spinlock_lock(&heaplock); |
while (x) { |
if (x->used || x->size < size) { |
84,7 → 84,7 |
*/ |
if (x->size < size + sizeof(chunk_t) + 1) { |
spinlock_unlock(&heaplock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
return &x->data[0]; |
} |
105,18 → 105,18 |
x->size = size; |
x->next = y; |
spinlock_unlock(&heaplock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
return &x->data[0]; |
} |
spinlock_unlock(&heaplock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
return NULL; |
} |
void early_free(void *ptr) |
{ |
pri_t pri; |
ipl_t ipl; |
chunk_t *x, *y, *z; |
if (!ptr) |
127,7 → 127,7 |
if (y->used != 1) |
panic("freeing unused/damaged chunk"); |
pri = cpu_priority_high(); |
ipl = interrupts_disable(); |
spinlock_lock(&heaplock); |
x = y->prev; |
z = y->next; |
150,5 → 150,5 |
} |
y->used = 0; |
spinlock_unlock(&heaplock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
} |
/SPARTAN/trunk/src/time/delay.c |
---|
42,12 → 42,15 |
*/ |
void delay(__u32 usec) |
{ |
pri_t pri; |
ipl_t ipl; |
/* The delay loop is calibrated for each and every |
CPU in the system. Therefore it is necessary to |
cpu_priority_high() before calling the asm_delay_loop(). */ |
pri = cpu_priority_high(); |
/* |
* The delay loop is calibrated for each and every |
* CPU in the system. Therefore it is necessary to |
* call interrupts_disable() before calling the |
* asm_delay_loop(). |
*/ |
ipl = interrupts_disable(); |
asm_delay_loop(usec * CPU->delay_loop_const); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
} |
/SPARTAN/trunk/src/time/timeout.c |
---|
100,10 → 100,10 |
{ |
timeout_t *hlp; |
link_t *l, *m; |
pri_t pri; |
ipl_t ipl; |
__u64 sum; |
pri = cpu_priority_high(); |
ipl = interrupts_disable(); |
spinlock_lock(&CPU->timeoutlock); |
spinlock_lock(&t->lock); |
152,7 → 152,7 |
spinlock_unlock(&t->lock); |
spinlock_unlock(&CPU->timeoutlock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
} |
168,19 → 168,19 |
{ |
timeout_t *hlp; |
link_t *l; |
pri_t pri; |
ipl_t ipl; |
grab_locks: |
pri = cpu_priority_high(); |
ipl = interrupts_disable(); |
spinlock_lock(&t->lock); |
if (!t->cpu) { |
spinlock_unlock(&t->lock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
return false; |
} |
if (!spinlock_trylock(&t->cpu->timeoutlock)) { |
spinlock_unlock(&t->lock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
goto grab_locks; |
} |
203,6 → 203,6 |
timeout_reinitialize(t); |
spinlock_unlock(&t->lock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
return true; |
} |
/SPARTAN/trunk/src/time/clock.c |
---|
44,7 → 44,7 |
/** Clock routine |
* |
* Clock routine executed from clock interrupt handler |
* (assuming cpu_priority_high()). Runs expired timeouts |
* (assuming interrupts_disable()'d). Runs expired timeouts |
* and preemptive scheduling. |
* |
*/ |
/SPARTAN/trunk/tools/amd64/gencontext.c |
---|
2,7 → 2,7 |
typedef long long __u64; |
typedef __u64 pri_t; |
typedef __u64 ipl_t; |
#define __amd64_TYPES_H__ |
#include "../../arch/amd64/include/context.h" |
31,7 → 31,7 |
fprintf(f,"#define OFFSET_R13 0x%x\n",((int)&pctx->r13) - (int )pctx); |
fprintf(f,"#define OFFSET_R14 0x%x\n",((int)&pctx->r14) - (int )pctx); |
fprintf(f,"#define OFFSET_R15 0x%x\n",((int)&pctx->r15) - (int )pctx); |
fprintf(f,"#define OFFSET_PRI 0x%x\n",((int)&pctx->pri) - (int )pctx); |
fprintf(f,"#define OFFSET_IPL 0x%x\n",((int)&pctx->ipl) - (int )pctx); |
fclose(f); |
return 0; |
/SPARTAN/trunk/tools/mips32/gencontext.c |
---|
2,7 → 2,7 |
typedef unsigned int __u32; |
typedef __u32 pri_t; |
typedef __u32 ipl_t; |
#define __mips32_TYPES_H__ |
#include "../../arch/mips32/include/context.h" |
/SPARTAN/trunk/arch/ia64/include/types.h |
---|
40,7 → 40,7 |
typedef __u64 __address; |
typedef __u64 pri_t; |
typedef __u64 ipl_t; |
typedef __u64 __native; |
/SPARTAN/trunk/arch/ia64/include/context.h |
---|
95,7 → 95,7 |
*/ |
__u64 pr; |
pri_t pri; |
ipl_t ipl; |
} __attribute__ ((packed)); |
#endif |
/SPARTAN/trunk/arch/ia64/src/context.S |
---|
134,6 → 134,7 |
* Restore application registers |
*/ |
/* TODO: ensure RSE lazy mode */ |
mov ar.bspstore = loc4 |
mov ar.rnat = loc5 |
mov ar.pfs = loc0 |
/SPARTAN/trunk/arch/ia64/src/dummy.s |
---|
35,10 → 35,10 |
.global arch_late_init |
.global cpu_identify |
.global cpu_print_report |
.global cpu_priority_high |
.global cpu_priority_low |
.global cpu_priority_read |
.global cpu_priority_restore |
.global interrupts_disable |
.global interrupts_enable |
.global interrupts_read |
.global interrupts_restore |
.global cpu_sleep |
.global dummy |
.global fpu_enable |
52,10 → 52,10 |
arch_late_init: |
cpu_identify: |
cpu_print_report: |
cpu_priority_high: |
cpu_priority_low: |
cpu_priority_read: |
cpu_priority_restore: |
interrupts_disable: |
interrupts_enable: |
interrupts_read: |
interrupts_restore: |
cpu_sleep: |
fpu_init: |
fpu_enable: |
/SPARTAN/trunk/arch/ppc32/include/types.h |
---|
40,7 → 40,7 |
typedef __u32 __address; |
typedef __u32 pri_t; |
typedef __u32 ipl_t; |
typedef __u32 __native; |
/SPARTAN/trunk/arch/ppc32/include/asm.h |
---|
32,14 → 32,16 |
#include <arch/types.h> |
#include <config.h> |
/** Set priority level low |
/** Enable interrupts. |
* |
* Enable interrupts and return previous |
* value of EE. |
* |
* @return Old interrupt priority level. |
*/ |
static inline pri_t cpu_priority_low(void) { |
pri_t v; |
pri_t tmp; |
static inline ipl_t interrupts_enable(void) { |
ipl_t v; |
ipl_t tmp; |
__asm__ volatile ( |
"mfmsr %0\n" |
51,14 → 53,16 |
return v; |
} |
/** Set priority level high |
/** Disable interrupts. |
* |
* Disable interrupts and return previous |
* value of EE. |
* |
* @return Old interrupt priority level. |
*/ |
static inline pri_t cpu_priority_high(void) { |
pri_t v; |
pri_t tmp; |
static inline ipl_t interrupts_disable(void) { |
ipl_t v; |
ipl_t tmp; |
__asm__ volatile ( |
"mfmsr %0\n" |
70,12 → 74,14 |
return v; |
} |
/** Restore priority level |
/** Restore interrupt priority level. |
* |
* Restore EE. |
* |
* @param ipl Saved interrupt priority level. |
*/ |
static inline void cpu_priority_restore(pri_t pri) { |
pri_t tmp; |
static inline void interrupts_restore(ipl_t ipl) { |
ipl_t tmp; |
__asm__ volatile ( |
"mfmsr %1\n" |
84,17 → 90,19 |
"beq 0f\n" |
"mtmsr %0\n" |
"0:\n" |
: "=r" (pri), "=r" (tmp) |
: "0" (pri) |
: "=r" (ipl), "=r" (tmp) |
: "0" (ipl) |
); |
} |
/** Return raw priority level |
/** Return interrupt priority level. |
* |
* Return EE. |
* |
* @return Current interrupt priority level. |
*/ |
static inline pri_t cpu_priority_read(void) { |
pri_t v; |
static inline ipl_t interrupts_read(void) { |
ipl_t v; |
__asm__ volatile ( |
"mfmsr %0\n" |
: "=r" (v) |
/SPARTAN/trunk/arch/ppc32/include/context.h |
---|
67,7 → 67,7 |
__u32 r30; |
__u32 r31; |
__u32 pc; |
pri_t pri; |
ipl_t ipl; |
} __attribute__ ((packed)); |
#endif |
/SPARTAN/trunk/arch/amd64/include/asm.h |
---|
82,13 → 82,15 |
); |
} |
/** Set priority level low |
/** Enable interrupts. |
* |
* Enable interrupts and return previous |
* value of EFLAGS. |
* |
* @return Old interrupt priority level. |
*/ |
static inline pri_t cpu_priority_low(void) { |
pri_t v; |
static inline ipl_t interrupts_enable(void) { |
ipl_t v; |
__asm__ volatile ( |
"pushfq\n" |
"popq %0\n" |
98,13 → 100,15 |
return v; |
} |
/** Set priority level high |
/** Disable interrupts. |
* |
* Disable interrupts and return previous |
* value of EFLAGS. |
* |
* @return Old interrupt priority level. |
*/ |
static inline pri_t cpu_priority_high(void) { |
pri_t v; |
static inline ipl_t interrupts_disable(void) { |
ipl_t v; |
__asm__ volatile ( |
"pushfq\n" |
"popq %0\n" |
114,24 → 118,28 |
return v; |
} |
/** Restore priority level |
/** Restore interrupt priority level. |
* |
* Restore EFLAGS. |
* |
* @param ipl Saved interrupt priority level. |
*/ |
static inline void cpu_priority_restore(pri_t pri) { |
static inline void interrupts_restore(ipl_t ipl) { |
__asm__ volatile ( |
"pushq %0\n" |
"popfq\n" |
: : "r" (pri) |
: : "r" (ipl) |
); |
} |
/** Return raw priority level |
/** Return interrupt priority level. |
* |
* Return EFLAFS. |
* |
* @return Current interrupt priority level. |
*/ |
static inline pri_t cpu_priority_read(void) { |
pri_t v; |
static inline ipl_t interrupts_read(void) { |
ipl_t v; |
__asm__ volatile ( |
"pushfq\n" |
"popq %0\n" |
/SPARTAN/trunk/arch/amd64/include/context.h |
---|
55,7 → 55,7 |
__u64 r14; |
__u64 r15; |
pri_t pri; |
ipl_t ipl; |
} __attribute__ ((packed)); |
#endif |
/SPARTAN/trunk/arch/amd64/include/types.h |
---|
40,8 → 40,8 |
typedef __u64 __address; |
/* Flags of processor (return value of cpu_priority_high()) */ |
typedef __u64 pri_t; |
/* Flags of processor (return value of interrupts_disable()) */ |
typedef __u64 ipl_t; |
typedef __u64 __native; |
/SPARTAN/trunk/arch/amd64/src/userspace.c |
---|
41,9 → 41,9 |
*/ |
void userspace(void) |
{ |
pri_t pri; |
ipl_t ipl; |
pri = cpu_priority_high(); |
ipl = interrupts_disable(); |
__asm__ volatile ("" |
"movq %0, %%rax;" |
57,7 → 57,7 |
"pushq %%rdx;" |
"pushq %%rsi;" |
"iretq;" |
: : "i" (gdtselector(UDATA_DES) | PL_USER), "i" (USTACK_ADDRESS+THREAD_STACK_SIZE), "r" (pri), "i" (gdtselector(UTEXT_DES) | PL_USER), "i" (UTEXT_ADDRESS)); |
: : "i" (gdtselector(UDATA_DES) | PL_USER), "i" (USTACK_ADDRESS+THREAD_STACK_SIZE), "r" (ipl), "i" (gdtselector(UTEXT_DES) | PL_USER), "i" (UTEXT_ADDRESS)); |
/* Unreachable */ |
for(;;); |
/SPARTAN/trunk/arch/amd64/src/interrupt.c |
---|
108,7 → 108,7 |
/* |
* Called directly from the assembler code. |
* CPU is cpu_priority_high(). |
* CPU is interrupts_disable()'d. |
*/ |
void trap_dispatcher(__u8 n, __native stack[]) |
{ |
/SPARTAN/trunk/arch/mips32/include/types.h |
---|
45,7 → 45,7 |
typedef __u32 __address; |
typedef __u32 pri_t; |
typedef __u32 ipl_t; |
typedef __u32 __native; |
/SPARTAN/trunk/arch/mips32/include/context.h |
---|
62,7 → 62,7 |
__u32 s8; |
__u32 gp; |
__u32 pri; |
ipl_t ipl; |
}; |
#endif /* __ASM__ */ |
/SPARTAN/trunk/arch/mips32/src/exception.c |
---|
46,11 → 46,11 |
/* |
* NOTE ON OPERATION ORDERING |
* |
* On entry, cpu_priority_high() must be called before |
* On entry, interrupts_disable() must be called before |
* exception bit is cleared. |
*/ |
cpu_priority_high(); |
interrupts_disable(); |
cp0_status_write(cp0_status_read() & ~ (cp0_status_exl_exception_bit | |
cp0_status_um_bit)); |
/SPARTAN/trunk/arch/mips32/src/mm/asid.c |
---|
44,13 → 44,13 |
*/ |
asid_t asid_get(void) |
{ |
pri_t pri; |
ipl_t ipl; |
int i, j; |
count_t min; |
min = (unsigned) -1; |
pri = cpu_priority_high(); |
ipl = interrupts_disable(); |
spinlock_lock(&asid_usage_lock); |
for (i = ASID_START, j = ASID_START; i < ASIDS; i++) { |
65,7 → 65,7 |
asid_usage[j]++; |
spinlock_unlock(&asid_usage_lock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
return i; |
} |
78,9 → 78,9 |
*/ |
void asid_put(asid_t asid) |
{ |
pri_t pri; |
ipl_t ipl; |
pri = cpu_priority_high(); |
ipl = interrupts_disable(); |
spinlock_lock(&asid_usage_lock); |
ASSERT(asid != ASID_INVALID); |
89,7 → 89,7 |
asid_usage[asid]--; |
spinlock_unlock(&asid_usage_lock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
} |
/** Find out whether ASID is used by more address spaces |
103,11 → 103,11 |
bool asid_has_conflicts(asid_t asid) |
{ |
bool has_conflicts = false; |
pri_t pri; |
ipl_t ipl; |
ASSERT(asid != ASID_INVALID); |
pri = cpu_priority_high(); |
ipl = interrupts_disable(); |
spinlock_lock(&asid_usage_lock); |
if (asid_usage[asid] > 1) |
114,7 → 114,7 |
has_conflicts = true; |
spinlock_unlock(&asid_usage_lock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
return has_conflicts; |
} |
/SPARTAN/trunk/arch/mips32/src/mm/tlb.c |
---|
315,12 → 315,12 |
void tlb_invalidate(asid_t asid) |
{ |
entry_hi_t hi; |
pri_t pri; |
ipl_t ipl; |
int i; |
ASSERT(asid != ASID_INVALID); |
pri = cpu_priority_high(); |
ipl = interrupts_disable(); |
for (i = 0; i < TLB_SIZE; i++) { |
cp0_index_write(i); |
336,7 → 336,7 |
} |
} |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
} |
/** Try to find PTE for faulting address |
/SPARTAN/trunk/arch/mips32/src/mm/vm.c |
---|
41,14 → 41,14 |
void vm_install_arch(vm_t *vm) |
{ |
entry_hi_t hi; |
pri_t pri; |
ipl_t ipl; |
hi.value = cp0_entry_hi_read(); |
pri = cpu_priority_high(); |
ipl = interrupts_disable(); |
spinlock_lock(&vm->lock); |
hi.asid = vm->asid; |
cp0_entry_hi_write(hi.value); |
spinlock_lock(&vm->unlock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
} |
/SPARTAN/trunk/arch/mips32/src/interrupt.c |
---|
52,26 → 52,42 |
pstate->ra,rasymbol); |
} |
pri_t cpu_priority_high(void) |
/** Disable interrupts. |
* |
* @return Old interrupt priority level. |
*/ |
ipl_t interrupts_disable(void) |
{ |
pri_t pri = (pri_t) cp0_status_read(); |
cp0_status_write(pri & ~cp0_status_ie_enabled_bit); |
return pri; |
ipl_t ipl = (ipl_t) cp0_status_read(); |
cp0_status_write(ipl & ~cp0_status_ie_enabled_bit); |
return ipl; |
} |
pri_t cpu_priority_low(void) |
/** Enable interrupts. |
* |
* @return Old interrupt priority level. |
*/ |
ipl_t interrupts_enable(void) |
{ |
pri_t pri = (pri_t) cp0_status_read(); |
cp0_status_write(pri | cp0_status_ie_enabled_bit); |
return pri; |
ipl_t ipl = (ipl_t) cp0_status_read(); |
cp0_status_write(ipl | cp0_status_ie_enabled_bit); |
return ipl; |
} |
void cpu_priority_restore(pri_t pri) |
/** Restore interrupt priority level. |
* |
* @param ipl Saved interrupt priority level. |
*/ |
void interrupts_restore(ipl_t ipl) |
{ |
cp0_status_write(cp0_status_read() | (pri & cp0_status_ie_enabled_bit)); |
cp0_status_write(cp0_status_read() | (ipl & cp0_status_ie_enabled_bit)); |
} |
pri_t cpu_priority_read(void) |
/** Read interrupt priority level. |
* |
* @return Current interrupt priority level. |
*/ |
ipl_t interrupts_read(void) |
{ |
return cp0_status_read(); |
} |
/SPARTAN/trunk/arch/mips32/src/drivers/arc.c |
---|
175,11 → 175,11 |
void arc_putchar(char ch) |
{ |
__u32 cnt; |
pri_t pri; |
ipl_t ipl; |
/* TODO: Should be spinlock? */ |
pri = cpu_priority_high(); |
ipl = interrupts_disable(); |
arc_entry->write(1, &ch, 1, &cnt); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
} |
/SPARTAN/trunk/arch/mips32/src/mips32.c |
---|
52,7 → 52,7 |
void arch_pre_mm_init(void) |
{ |
/* It is not assumed by default */ |
cpu_priority_high(); |
interrupts_disable(); |
init_arc(); |
/SPARTAN/trunk/arch/ia32/include/types.h |
---|
40,7 → 40,7 |
typedef __u32 __address; |
typedef __u32 pri_t; |
typedef __u32 ipl_t; |
typedef __u32 __native; |
/SPARTAN/trunk/arch/ia32/include/asm.h |
---|
131,13 → 131,15 |
*/ |
static inline __u32 inl(__u16 port) { __u32 val; __asm__ volatile ("inl %w1, %l0 \n" : "=a" (val) : "d" (port) ); return val; } |
/** Set priority level low |
/** Enable interrupts. |
* |
* Enable interrupts and return previous |
* value of EFLAGS. |
* |
* @return Old interrupt priority level. |
*/ |
static inline pri_t cpu_priority_low(void) { |
pri_t v; |
static inline ipl_t interrupts_enable(void) { |
ipl_t v; |
__asm__ volatile ( |
"pushf\n\t" |
"popl %0\n\t" |
147,13 → 149,15 |
return v; |
} |
/** Set priority level high |
/** Disable interrupts. |
* |
* Disable interrupts and return previous |
* value of EFLAGS. |
* |
* @return Old interrupt priority level. |
*/ |
static inline pri_t cpu_priority_high(void) { |
pri_t v; |
static inline ipl_t interrupts_disable(void) { |
ipl_t v; |
__asm__ volatile ( |
"pushf\n\t" |
"popl %0\n\t" |
163,24 → 167,26 |
return v; |
} |
/** Restore priority level |
/** Restore interrupt priority level. |
* |
* Restore EFLAGS. |
* |
* @param ipl Saved interrupt priority level. |
*/ |
static inline void cpu_priority_restore(pri_t pri) { |
static inline void interrupts_restore(ipl_t ipl) { |
__asm__ volatile ( |
"pushl %0\n\t" |
"popf\n" |
: : "r" (pri) |
: : "r" (ipl) |
); |
} |
/** Return raw priority level |
/** Return interrupt priority level. |
* |
* Return EFLAFS. |
* @return EFLAFS. |
*/ |
static inline pri_t cpu_priority_read(void) { |
pri_t v; |
static inline ipl_t interrupts_read(void) { |
ipl_t v; |
__asm__ volatile ( |
"pushf\n\t" |
"popl %0\n" |
/SPARTAN/trunk/arch/ia32/include/context.h |
---|
52,7 → 52,7 |
__u32 esi; |
__u32 edi; |
__u32 ebp; |
__u32 pri; |
ipl_t ipl; |
} __attribute__ ((packed)); |
#endif |
/SPARTAN/trunk/arch/ia32/src/userspace.c |
---|
41,9 → 41,9 |
*/ |
void userspace(void) |
{ |
pri_t pri; |
ipl_t ipl; |
pri = cpu_priority_high(); |
ipl = interrupts_disable(); |
__asm__ volatile ( |
/* CLNT */ |
60,7 → 60,7 |
"pushl %4\n" |
"iret" |
: |
: "i" (selector(UDATA_DES) | PL_USER), "r" (USTACK_ADDRESS+(THREAD_STACK_SIZE)), "r" (pri), "i" (selector(UTEXT_DES) | PL_USER), "i" (UTEXT_ADDRESS) |
: "i" (selector(UDATA_DES) | PL_USER), "r" (USTACK_ADDRESS+(THREAD_STACK_SIZE)), "r" (ipl), "i" (selector(UTEXT_DES) | PL_USER), "i" (UTEXT_ADDRESS) |
: "eax"); |
/* Unreachable */ |
/SPARTAN/trunk/arch/ia32/src/interrupt.c |
---|
79,7 → 79,7 |
/* |
* Called directly from the assembler code. |
* CPU is cpu_priority_high(). |
* CPU is interrupts_disable()'d. |
*/ |
void trap_dispatcher(__u8 n, __native stack[]) |
{ |
/SPARTAN/trunk/arch/ia32/src/drivers/ega.c |
---|
80,9 → 80,9 |
void ega_putchar(const char ch) |
{ |
pri_t pri; |
ipl_t ipl; |
pri = cpu_priority_high(); |
ipl = interrupts_disable(); |
spinlock_lock(&egalock); |
switch (ch) { |
101,7 → 101,7 |
ega_move_cursor(); |
spinlock_unlock(&egalock); |
cpu_priority_restore(pri); |
interrupts_restore(ipl); |
} |
void ega_move_cursor(void) |