Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 782 → Rev 783

/kernel/trunk/generic/include/cpu.h
50,7 → 50,7
 
context_t saved_context;
 
volatile count_t nrdy;
atomic_t nrdy;
runq_t rq[RQ_COUNT];
volatile count_t needs_relink;
 
/kernel/trunk/generic/src/proc/scheduler.c
119,20 → 119,14
{
thread_t *t;
runq_t *r;
int i, n;
int i;
 
ASSERT(CPU != NULL);
 
loop:
interrupts_disable();
 
spinlock_lock(&CPU->lock);
n = CPU->nrdy;
spinlock_unlock(&CPU->lock);
 
interrupts_enable();
if (n == 0) {
if (atomic_get(&CPU->nrdy) == 0) {
/*
* For there was nothing to run, the CPU goes to sleep
* until a hardware interrupt or an IPI comes.
145,7 → 139,6
interrupts_disable();
i = 0;
retry:
for (; i<RQ_COUNT; i++) {
r = &CPU->rq[i];
spinlock_lock(&r->lock);
157,17 → 150,7
continue;
}
 
/* avoid deadlock with relink_rq() */
if (!spinlock_trylock(&CPU->lock)) {
/*
* Unlock r and try again.
*/
spinlock_unlock(&r->lock);
goto retry;
}
CPU->nrdy--;
spinlock_unlock(&CPU->lock);
 
atomic_dec(&CPU->nrdy);
atomic_dec(&nrdy);
r->n--;
 
464,7 → 447,7
void kcpulb(void *arg)
{
thread_t *t;
int count, i, j, k = 0;
int count, average, i, j, k = 0;
ipl_t ipl;
 
loop:
479,16 → 462,17
* other CPU's. Note that situation can have changed between two
* passes. Each time get the most up to date counts.
*/
ipl = interrupts_disable();
spinlock_lock(&CPU->lock);
count = atomic_get(&nrdy) / config.cpu_active;
count -= CPU->nrdy;
spinlock_unlock(&CPU->lock);
interrupts_restore(ipl);
average = atomic_get(&nrdy) / config.cpu_active;
count = average - atomic_get(&CPU->nrdy);
 
if (count <= 0)
if (count < 0)
goto satisfied;
 
if (!count) { /* Try to steal threads from CPU's that have more then average count */
count = 1;
average += 1;
}
 
/*
* Searching least priority queues on all CPU's first and most priority queues on all CPU's last.
*/
505,7 → 489,9
* Doesn't require interrupt disabling for kcpulb is X_WIRED.
*/
if (CPU == cpu)
continue;
continue;
if (atomic_get(&cpu->nrdy) <= average)
continue;
 
restart: ipl = interrupts_disable();
r = &cpu->rq[j];
544,7 → 530,7
interrupts_restore(ipl);
goto restart;
}
cpu->nrdy--;
atomic_dec(&cpu->nrdy);
spinlock_unlock(&cpu->lock);
 
atomic_dec(&nrdy);
566,7 → 552,7
*/
spinlock_lock(&t->lock);
#ifdef KCPULB_VERBOSE
printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, CPU->nrdy, atomic_get(&nrdy) / config.cpu_active);
printf("kcpulb%d: TID %d -> cpu%d, nrdy=%d, avg=%d\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active);
#endif
t->flags |= X_STOLEN;
spinlock_unlock(&t->lock);
589,7 → 575,7
}
}
 
if (CPU->nrdy) {
if (atomic_get(&CPU->nrdy)) {
/*
* Be a little bit light-weight and let migrated threads run.
*/
629,7 → 615,7
continue;
spinlock_lock(&cpus[cpu].lock);
printf("cpu%d: nrdy: %d needs_relink: %d\n",
cpus[cpu].id, cpus[cpu].nrdy, cpus[cpu].needs_relink);
cpus[cpu].id, atomic_get(&cpus[cpu].nrdy), cpus[cpu].needs_relink);
for (i=0; i<RQ_COUNT; i++) {
r = &cpus[cpu].rq[i];
/kernel/trunk/generic/src/proc/thread.c
137,16 → 137,8
 
atomic_inc(&nrdy);
avg = atomic_get(&nrdy) / config.cpu_active;
atomic_inc(&cpu->nrdy);
 
spinlock_lock(&cpu->lock);
if ((++cpu->nrdy) > avg) {
/*
* If there are idle halted CPU's, this will wake them up.
*/
ipi_broadcast(VECTOR_WAKEUP_IPI);
}
spinlock_unlock(&cpu->lock);
 
interrupts_restore(ipl);
}
 
/kernel/trunk/generic/src/mm/slab.c
252,8 → 252,8
/* It was in full, move to partial */
list_remove(&slab->link);
list_prepend(&slab->link, &cache->partial_slabs);
spinlock_unlock(&cache->slablock);
}
spinlock_unlock(&cache->slablock);
return 0;
}
 
536,6 → 536,7
{
int i;
int pages;
ipl_t ipl;
 
memsetb((__address)cache, sizeof(*cache), 0);
cache->name = name;
580,11 → 581,14
if (badness(cache) > sizeof(slab_t))
cache->flags |= SLAB_CACHE_SLINSIDE;
 
/* Add cache to cache list */
ipl = interrupts_disable();
spinlock_lock(&slab_cache_lock);
 
list_append(&cache->link, &slab_cache_list);
 
spinlock_unlock(&slab_cache_lock);
interrupts_restore(ipl);
}
 
/** Create slab cache */
758,7 → 762,9
{
slab_cache_t *cache;
link_t *cur;
 
ipl_t ipl;
ipl = interrupts_disable();
spinlock_lock(&slab_cache_lock);
printf("SLAB name\tOsize\tPages\tObj/pg\tSlabs\tCached\tAllocobjs\tCtl\n");
for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
771,6 → 777,7
cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out");
}
spinlock_unlock(&slab_cache_lock);
interrupts_restore(ipl);
}
 
#ifdef CONFIG_DEBUG