Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 775 → Rev 776

/kernel/trunk/generic/src/mm/slab.c
85,6 → 85,9
* 'empty-magazine-list', which decreases competing for 1 per-system
* magazine cache.
*
* - it might be good to add granularity of locks even to slab level,
* we could then try_spinlock over all partial slabs and thus improve
* scalability even on slab level
*/
 
 
218,8 → 221,6
/**
* Return object to slab and call a destructor
*
* Assume the cache->lock is held;
*
* @param slab If the caller knows directly slab of the object, otherwise NULL
*
* @return Number of freed pages
234,6 → 235,8
 
ASSERT(slab->cache == cache);
 
spinlock_lock(&cache->slablock);
 
*((int *)obj) = slab->nextavail;
slab->nextavail = (obj - slab->start)/cache->size;
slab->available++;
247,12 → 250,14
if (slab->available == cache->objects) {
/* Free associated memory */
list_remove(&slab->link);
/* Avoid deadlock */
spinlock_unlock(&cache->lock);
/* This should not produce deadlock, as
* magazine is always allocated with NO reclaim,
* keep all locks */
frames = slab_space_free(cache, slab);
spinlock_lock(&cache->lock);
}
 
spinlock_unlock(&cache->slablock);
 
return frames;
}
 
259,8 → 264,6
/**
* Take new object from slab or create new if needed
*
* Assume cache->lock is held.
*
* @return Object address or null
*/
static void * slab_obj_create(slab_cache_t *cache, int flags)
268,6 → 271,8
slab_t *slab;
void *obj;
 
spinlock_lock(&cache->slablock);
 
if (list_empty(&cache->partial_slabs)) {
/* Allow recursion and reclaiming
* - this should work, as the SLAB control structures
275,12 → 280,11
* other ten frame_alloc when they are allocating,
* that's why we should get recursion at most 1-level deep
*/
spinlock_unlock(&cache->lock);
spinlock_unlock(&cache->slablock);
slab = slab_space_alloc(cache, flags);
spinlock_lock(&cache->lock);
if (!slab) {
return NULL;
}
spinlock_lock(&cache->slablock);
if (!slab)
goto err;
} else {
slab = list_get_instance(cache->partial_slabs.next,
slab_t,
294,7 → 298,12
list_prepend(&slab->link, &cache->full_slabs);
else
list_prepend(&slab->link, &cache->partial_slabs);
 
spinlock_unlock(&cache->slablock);
return obj;
err:
spinlock_unlock(&cache->slablock);
return NULL;
}
 
/**************************************/
303,8 → 312,6
/**
* Free all objects in magazine and free memory associated with magazine
*
* Assume cache->lock is held
*
* @return Number of freed pages
*/
static count_t magazine_destroy(slab_cache_t *cache,
345,9 → 352,9
}
}
/* Local magazines are empty, import one from magazine list */
spinlock_lock(&cache->lock);
spinlock_lock(&cache->maglock);
if (list_empty(&cache->magazines)) {
spinlock_unlock(&cache->lock);
spinlock_unlock(&cache->maglock);
return NULL;
}
newmag = list_get_instance(cache->magazines.next,
354,7 → 361,7
slab_magazine_t,
link);
list_remove(&newmag->link);
spinlock_unlock(&cache->lock);
spinlock_unlock(&cache->maglock);
 
if (lastmag)
slab_free(&mag_cache, lastmag);
431,9 → 438,9
 
/* Flush last to magazine list */
if (lastmag) {
spinlock_lock(&cache->lock);
spinlock_lock(&cache->maglock);
list_prepend(&lastmag->link, &cache->magazines);
spinlock_unlock(&cache->lock);
spinlock_unlock(&cache->maglock);
}
/* Move current as last, save new as current */
cache->mag_cache[CPU->id].last = cmag;
524,13 → 531,14
list_initialize(&cache->full_slabs);
list_initialize(&cache->partial_slabs);
list_initialize(&cache->magazines);
spinlock_initialize(&cache->lock, "cachelock");
spinlock_initialize(&cache->slablock, "slab_lock");
spinlock_initialize(&cache->maglock, "slab_maglock");
if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) {
for (i=0; i < config.cpu_count; i++) {
memsetb((__address)&cache->mag_cache[i],
sizeof(cache->mag_cache[i]), 0);
spinlock_initialize(&cache->mag_cache[i].lock,
"cpucachelock");
"slab_maglock_cpu");
}
}
 
594,7 → 602,7
for (i=0; i < config.cpu_count; i++)
spinlock_lock(&cache->mag_cache[i].lock);
}
spinlock_lock(&cache->lock);
spinlock_lock(&cache->maglock);
if (flags & SLAB_RECLAIM_ALL) {
/* Aggressive memfree */
611,6 → 619,11
cache->mag_cache[i].last = NULL;
}
}
/* We can release the cache locks now */
if (flags & SLAB_RECLAIM_ALL) {
for (i=0; i < config.cpu_count; i++)
spinlock_unlock(&cache->mag_cache[i].lock);
}
/* Destroy full magazines */
cur=cache->magazines.prev;
 
626,12 → 639,7
break;
}
spinlock_unlock(&cache->lock);
/* We can release the cache locks now */
if (flags & SLAB_RECLAIM_ALL) {
for (i=0; i < config.cpu_count; i++)
spinlock_unlock(&cache->mag_cache[i].lock);
}
spinlock_unlock(&cache->maglock);
return frames;
}
670,11 → 678,8
if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
result = magazine_obj_get(cache);
 
if (!result) {
spinlock_lock(&cache->lock);
if (!result)
result = slab_obj_create(cache, flags);
spinlock_unlock(&cache->lock);
}
 
interrupts_restore(ipl);
 
693,9 → 698,9
 
if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
|| magazine_obj_put(cache, obj)) {
spinlock_lock(&cache->lock);
 
slab_obj_destroy(cache, obj, slab);
spinlock_unlock(&cache->lock);
 
}
interrupts_restore(ipl);
atomic_dec(&cache->allocated_objs);
716,6 → 721,10
 
spinlock_lock(&slab_cache_lock);
 
/* TODO: Add assert, that interrupts are disabled, otherwise
* memory allocation from interrupts can deadlock.
*/
 
for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
cache = list_get_instance(cur, slab_cache_t, link);
frames += _slab_reclaim(cache, flags);