Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 121 → Rev 125

/SPARTAN/trunk/src/main/kinit.c
94,7 → 94,7
 
#ifdef __SMP__
if (config.cpu_count > 1) {
/*
/*
* For each CPU, create its load balancing thread.
*/
for (i = 0; i < config.cpu_count; i++) {
/SPARTAN/trunk/src/cpu/cpu.c
66,7 → 66,7
/* initialize everything */
memsetb((__address) cpu_private_data, sizeof(cpu_private_data_t) * config.cpu_count, 0);
memsetb((__address) cpus, sizeof(cpu_t) * config.cpu_count, 0);
 
for (i=0; i < config.cpu_count; i++) {
cpus[i].stack = (__u8 *) malloc(CPU_STACK_SIZE);
if (!cpus[i].stack)
/SPARTAN/trunk/src/time/timeout.c
105,7 → 105,7
pri = cpu_priority_high();
spinlock_lock(&CPU->timeoutlock);
spinlock_lock(&t->lock);
 
if (t->cpu)
panic("t->cpu != 0");
 
114,7 → 114,7
t->handler = f;
t->arg = arg;
 
/*
* Insert t into the active timeouts list according to t->ticks.
*/
/SPARTAN/trunk/src/time/clock.c
89,7 → 89,7
spinlock_unlock(&CPU->lock);
spinlock_lock(&THREAD->lock);
if (!THREAD->ticks--) {
if (!THREAD->ticks--) {
spinlock_unlock(&THREAD->lock);
scheduler();
}
/SPARTAN/trunk/src/proc/scheduler.c
102,7 → 102,7
* set CPU-private flag that the kcpulb has been started.
*/
if (test_and_set(&CPU->kcpulbstarted) == 0) {
waitq_wakeup(&CPU->kcpulb_wq, 0);
waitq_wakeup(&CPU->kcpulb_wq, 0);
goto loop;
}
#endif /* __SMP__ */
238,7 → 238,7
* This is the place where threads leave scheduler();
*/
before_thread_runs();
spinlock_unlock(&THREAD->lock);
spinlock_unlock(&THREAD->lock);
cpu_priority_restore(THREAD->saved_context.pri);
return;
}
278,74 → 278,73
if (THREAD) {
switch (THREAD->state) {
case Running:
THREAD->state = Ready;
spinlock_unlock(&THREAD->lock);
thread_ready(THREAD);
break;
THREAD->state = Ready;
spinlock_unlock(&THREAD->lock);
thread_ready(THREAD);
break;
 
case Exiting:
frame_free((__address) THREAD->kstack);
if (THREAD->ustack) {
frame_free((__address) THREAD->ustack);
}
/*
* Detach from the containing task.
*/
spinlock_lock(&TASK->lock);
list_remove(&THREAD->th_link);
spinlock_unlock(&TASK->lock);
frame_free((__address) THREAD->kstack);
if (THREAD->ustack) {
frame_free((__address) THREAD->ustack);
}
 
spinlock_unlock(&THREAD->lock);
spinlock_lock(&threads_lock);
list_remove(&THREAD->threads_link);
spinlock_unlock(&threads_lock);
/*
* Detach from the containing task.
*/
spinlock_lock(&TASK->lock);
list_remove(&THREAD->th_link);
spinlock_unlock(&TASK->lock);
 
spinlock_lock(&CPU->lock);
if(CPU->fpu_owner==THREAD) CPU->fpu_owner=NULL;
spinlock_unlock(&CPU->lock);
spinlock_unlock(&THREAD->lock);
spinlock_lock(&threads_lock);
list_remove(&THREAD->threads_link);
spinlock_unlock(&threads_lock);
 
free(THREAD);
break;
spinlock_lock(&CPU->lock);
if(CPU->fpu_owner==THREAD) CPU->fpu_owner=NULL;
spinlock_unlock(&CPU->lock);
 
free(THREAD);
 
break;
case Sleeping:
/*
* Prefer the thread after it's woken up.
*/
THREAD->pri = -1;
/*
* Prefer the thread after it's woken up.
*/
THREAD->pri = -1;
 
/*
* We need to release wq->lock which we locked in waitq_sleep().
* Address of wq->lock is kept in THREAD->sleep_queue.
*/
spinlock_unlock(&THREAD->sleep_queue->lock);
/*
* We need to release wq->lock which we locked in waitq_sleep().
* Address of wq->lock is kept in THREAD->sleep_queue.
*/
spinlock_unlock(&THREAD->sleep_queue->lock);
 
/*
* Check for possible requests for out-of-context invocation.
*/
if (THREAD->call_me) {
THREAD->call_me(THREAD->call_me_with);
THREAD->call_me = NULL;
THREAD->call_me_with = NULL;
}
/*
* Check for possible requests for out-of-context invocation.
*/
if (THREAD->call_me) {
THREAD->call_me(THREAD->call_me_with);
THREAD->call_me = NULL;
THREAD->call_me_with = NULL;
}
 
spinlock_unlock(&THREAD->lock);
break;
spinlock_unlock(&THREAD->lock);
 
break;
 
default:
/*
* Entering state is unexpected.
*/
panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
break;
/*
* Entering state is unexpected.
*/
panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]);
break;
}
THREAD = NULL;
}
 
THREAD = find_best_thread();
spinlock_lock(&THREAD->lock);
469,9 → 468,9
while (l != &r->rq_head) {
t = list_get_instance(l, thread_t, rq_link);
/*
* We don't want to steal CPU-wired threads neither threads already stolen.
* We don't want to steal CPU-wired threads neither threads already stolen.
* The latter prevents threads from migrating between CPU's without ever being run.
* We don't want to steal threads whose FPU context is still in CPU.
* We don't want to steal threads whose FPU context is still in CPU.
*/
spinlock_lock(&t->lock);
if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
497,7 → 496,7
 
atomic_dec(&nrdy);
 
r->n--;
r->n--;
list_remove(&t->rq_link);
 
break;
527,7 → 526,7
goto satisfied;
/*
* We are not satisfied yet, focus on another CPU next time.
* We are not satisfied yet, focus on another CPU next time.
*/
k++;
552,7 → 551,7
}
goto not_satisfied;
 
satisfied:
/*
* Tell find_best_thread() to wake us up later again.
/SPARTAN/trunk/src/proc/thread.c
146,7 → 146,7
ipi_broadcast(VECTOR_WAKEUP_IPI);
}
spinlock_unlock(&cpu->lock);
 
cpu_priority_restore(pri);
}
 
277,7 → 277,7
*/
void thread_sleep(__u32 sec)
{
thread_usleep(sec*1000000);
thread_usleep(sec*1000000);
}
 
 
/SPARTAN/trunk/src/mm/frame.c
54,23 → 54,23
 
void frame_init(void)
{
if (config.cpu_active == 1) {
if (config.cpu_active == 1) {
 
/*
* The bootstrap processor will allocate all necessary memory for frame allocation.
*/
/*
* The bootstrap processor will allocate all necessary memory for frame allocation.
*/
 
frames = config.memory_size / FRAME_SIZE;
frame_bitmap_octets = frames / 8 + (frames % 8 > 0);
frame_bitmap = (__u8 *) malloc(frame_bitmap_octets);
if (!frame_bitmap)
panic("malloc/frame_bitmap\n");
frames = config.memory_size / FRAME_SIZE;
frame_bitmap_octets = frames / 8 + (frames % 8 > 0);
frame_bitmap = (__u8 *) malloc(frame_bitmap_octets);
if (!frame_bitmap)
panic("malloc/frame_bitmap\n");
 
/*
* Mark all frames free.
*/
memsetb((__address) frame_bitmap, frame_bitmap_octets, 0);
frames_free = frames;
/*
* Mark all frames free.
*/
memsetb((__address) frame_bitmap, frame_bitmap_octets, 0);
frames_free = frames;
}
 
/*
80,10 → 80,10
frame_arch_init();
 
if (config.cpu_active == 1) {
/*
* Create the memory address space map. Marked frames and frame
* regions cannot be used for allocation.
*/
/*
* Create the memory address space map. Marked frames and frame
* regions cannot be used for allocation.
*/
frame_region_not_free(config.base, config.base + config.kernel_size);
}
}
/SPARTAN/trunk/src/mm/page.c
53,11 → 53,11
int i, cnt, length;
 
/* TODO: implement portable way of computing page address from address */
length = size + (s - (s & 0xfffff000));
cnt = length/PAGE_SIZE + (length%PAGE_SIZE>0);
length = size + (s - (s & 0xfffff000));
cnt = length/PAGE_SIZE + (length%PAGE_SIZE>0);
 
for (i = 0; i < cnt; i++)
map_page_to_frame(s + i*PAGE_SIZE, s + i*PAGE_SIZE, PAGE_NOT_CACHEABLE, 0);
for (i = 0; i < cnt; i++)
map_page_to_frame(s + i*PAGE_SIZE, s + i*PAGE_SIZE, PAGE_NOT_CACHEABLE, 0);
 
}
 
73,38 → 73,38
*/
void map_page_to_frame(__address page, __address frame, int flags, __address root)
{
pte_t *ptl0, *ptl1, *ptl2, *ptl3;
__address newpt;
pte_t *ptl0, *ptl1, *ptl2, *ptl3;
__address newpt;
 
ptl0 = (pte_t *) PA2KA(root ? root : (__address) GET_PTL0_ADDRESS());
ptl0 = (pte_t *) PA2KA(root ? root : (__address) GET_PTL0_ADDRESS());
 
if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT) {
newpt = frame_alloc(FRAME_KA);
memsetb(newpt, PAGE_SIZE, 0);
SET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page), KA2PA(newpt));
SET_PTL1_FLAGS(ptl0, PTL0_INDEX(page), PAGE_PRESENT | PAGE_USER);
}
if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT) {
newpt = frame_alloc(FRAME_KA);
memsetb(newpt, PAGE_SIZE, 0);
SET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page), KA2PA(newpt));
SET_PTL1_FLAGS(ptl0, PTL0_INDEX(page), PAGE_PRESENT | PAGE_USER);
}
 
ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
 
if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT) {
newpt = frame_alloc(FRAME_KA);
memsetb(newpt, PAGE_SIZE, 0);
SET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page), KA2PA(newpt));
SET_PTL2_FLAGS(ptl1, PTL1_INDEX(page), PAGE_PRESENT | PAGE_USER);
}
if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT) {
newpt = frame_alloc(FRAME_KA);
memsetb(newpt, PAGE_SIZE, 0);
SET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page), KA2PA(newpt));
SET_PTL2_FLAGS(ptl1, PTL1_INDEX(page), PAGE_PRESENT | PAGE_USER);
}
 
ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
 
if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT) {
newpt = frame_alloc(FRAME_KA);
memsetb(newpt, PAGE_SIZE, 0);
SET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page), KA2PA(newpt));
SET_PTL3_FLAGS(ptl2, PTL2_INDEX(page), PAGE_PRESENT | PAGE_USER);
}
if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT) {
newpt = frame_alloc(FRAME_KA);
memsetb(newpt, PAGE_SIZE, 0);
SET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page), KA2PA(newpt));
SET_PTL3_FLAGS(ptl2, PTL2_INDEX(page), PAGE_PRESENT | PAGE_USER);
}
 
ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
 
SET_FRAME_ADDRESS(ptl3, PTL3_INDEX(page), frame);
SET_FRAME_FLAGS(ptl3, PTL3_INDEX(page), flags);
SET_FRAME_ADDRESS(ptl3, PTL3_INDEX(page), frame);
SET_FRAME_FLAGS(ptl3, PTL3_INDEX(page), flags);
}