Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 767 → Rev 768

/kernel/trunk/test/mm/slab1/test.c
32,6 → 32,7
#include <proc/thread.h>
#include <arch.h>
#include <panic.h>
#include <memstr.h>
 
#define VAL_COUNT 1024
 
50,6 → 51,7
printf("Allocating %d items...", count);
for (i=0; i < count; i++) {
data[i] = slab_alloc(cache, 0);
memsetb((__address)data[i], size, 0);
}
printf("done.\n");
printf("Freeing %d items...", count);
61,6 → 63,7
printf("Allocating %d items...", count);
for (i=0; i < count; i++) {
data[i] = slab_alloc(cache, 0);
memsetb((__address)data[i], size, 0);
}
printf("done.\n");
 
74,6 → 77,7
printf("Allocating %d items...", count/2);
for (i=count/2; i < count; i++) {
data[i] = slab_alloc(cache, 0);
memsetb((__address)data[i], size, 0);
}
printf("done.\n");
printf("Freeing %d items...", count);
/kernel/trunk/test/mm/slab2/test.c
33,6 → 33,7
#include <arch.h>
#include <panic.h>
#include <mm/frame.h>
#include <memstr.h>
 
#define ITEM_SIZE 256
 
64,7 → 65,8
slab_free(cache2,data2);
break;
}
 
memsetb((__address)data1, ITEM_SIZE, 0);
memsetb((__address)data2, ITEM_SIZE, 0);
*((void **)data1) = olddata1;
*((void **)data2) = olddata2;
olddata1 = data1;
88,6 → 90,7
if (!data1) {
panic("Incorrect memory size - use another test.");
}
memsetb((__address)data1, ITEM_SIZE, 0);
*((void **)data1) = olddata1;
olddata1 = data1;
}
97,14 → 100,28
if (!data1) {
break;
}
memsetb((__address)data1, ITEM_SIZE, 0);
*((void **)data1) = olddata1;
olddata1 = data1;
}
slab_print_list();
printf("Deallocating cache1...");
while (olddata1) {
data1 = *((void **)olddata1);
slab_free(cache1, olddata1);
olddata1 = data1;
}
printf("done.\n");
slab_print_list();
slab_cache_destroy(cache1);
slab_cache_destroy(cache2);
}
 
void test(void)
{
printf("Running reclaim test .. pass1\n");
totalmemtest();
printf("Running reclaim test .. pass2\n");
totalmemtest();
printf("Reclaim test OK.\n");
}
/kernel/trunk/generic/src/mm/slab.c
75,7 → 75,7
if (status != FRAME_OK) {
return NULL;
}
if (! cache->flags & SLAB_CACHE_SLINSIDE) {
if (! (cache->flags & SLAB_CACHE_SLINSIDE)) {
slab = malloc(sizeof(*slab)); // , flags);
if (!slab) {
frame_free((__address)data);
102,7 → 102,6
*((int *) (slab->start + i*cache->size)) = i+1;
 
atomic_inc(&cache->allocated_slabs);
 
return slab;
}
 
114,7 → 113,7
static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
{
frame_free((__address)slab->start);
if (! cache->flags & SLAB_CACHE_SLINSIDE)
if (! (cache->flags & SLAB_CACHE_SLINSIDE))
free(slab);
 
atomic_dec(&cache->allocated_slabs);
277,6 → 276,7
}
/* Free current magazine and take one from list */
slab_free(&mag_cache, mag);
 
mag = list_get_instance(cache->magazines.next,
slab_magazine_t,
link);
296,7 → 296,8
}
 
/**
* Put object into CPU-cache magazine
* Assure that the current magazine is empty, return pointer to it, or NULL if
* no empty magazine available and cannot be allocated
*
* We have 2 magazines bound to processor.
* First try the current.
304,6 → 305,46
* If full, put to magazines list.
* allocate new, exchange last & current
*
*/
static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache)
{
slab_magazine_t *cmag,*lastmag,*newmag;
 
cmag = cache->mag_cache[CPU->id].current;
lastmag = cache->mag_cache[CPU->id].last;
 
if (cmag) {
if (cmag->busy < cmag->size)
return cmag;
if (lastmag && lastmag->busy < lastmag->size) {
cache->mag_cache[CPU->id].last = cmag;
cache->mag_cache[CPU->id].current = lastmag;
return lastmag;
}
}
/* current | last are full | nonexistent, allocate new */
/* We do not want to sleep just because of caching */
/* Especially we do not want reclaiming to start, as
* this would deadlock */
newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
if (!newmag)
return NULL;
newmag->size = SLAB_MAG_SIZE;
newmag->busy = 0;
 
/* Flush last to magazine list */
if (lastmag)
list_prepend(&lastmag->link, &cache->magazines);
/* Move current as last, save new as current */
cache->mag_cache[CPU->id].last = cmag;
cache->mag_cache[CPU->id].current = newmag;
 
return newmag;
}
 
/**
* Put object into CPU-cache magazine
*
* @return 0 - success, -1 - could not get memory
*/
static int magazine_obj_put(slab_cache_t *cache, void *obj)
311,38 → 352,11
slab_magazine_t *mag;
 
spinlock_lock(&cache->mag_cache[CPU->id].lock);
 
mag = make_empty_current_mag(cache);
if (!mag)
goto errout;
mag = cache->mag_cache[CPU->id].current;
if (!mag) {
/* We do not want to sleep just because of caching */
/* Especially we do not want reclaiming to start, as
* this would deadlock */
mag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
if (!mag) /* Allocation failed, give up on caching */
goto errout;
 
cache->mag_cache[CPU->id].current = mag;
mag->size = SLAB_MAG_SIZE;
mag->busy = 0;
} else if (mag->busy == mag->size) {
/* If the last is full | empty, allocate new */
mag = cache->mag_cache[CPU->id].last;
if (!mag || mag->size == mag->busy) {
if (mag)
list_prepend(&mag->link, &cache->magazines);
 
mag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
if (!mag)
goto errout;
mag->size = SLAB_MAG_SIZE;
mag->busy = 0;
cache->mag_cache[CPU->id].last = mag;
}
/* Exchange the 2 */
cache->mag_cache[CPU->id].last = cache->mag_cache[CPU->id].current;
cache->mag_cache[CPU->id].current = mag;
}
mag->objs[mag->busy++] = obj;
 
spinlock_unlock(&cache->mag_cache[CPU->id].lock);
408,7 → 422,7
list_initialize(&cache->partial_slabs);
list_initialize(&cache->magazines);
spinlock_initialize(&cache->lock, "cachelock");
if (! cache->flags & SLAB_CACHE_NOMAGAZINE) {
if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) {
for (i=0; i< config.cpu_count; i++)
spinlock_initialize(&cache->mag_cache[i].lock,
"cpucachelock");
457,8 → 471,6
*
* @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing
* @return Number of freed pages
*
* TODO: Add light reclaim
*/
static count_t _slab_reclaim(slab_cache_t *cache, int flags)
{
493,12 → 505,11
/* Destroy full magazines */
cur=cache->magazines.prev;
 
while (cur!=&cache->magazines) {
while (cur != &cache->magazines) {
mag = list_get_instance(cur, slab_magazine_t, link);
cur = cur->prev;
list_remove(cur->next);
// list_remove(&mag->link);
list_remove(&mag->link);
frames += magazine_destroy(cache,mag);
/* If we do not do full reclaim, break
* as soon as something is freed */
544,7 → 555,7
/* Disable interrupts to avoid deadlocks with interrupt handlers */
ipl = interrupts_disable();
if (!cache->flags & SLAB_CACHE_NOMAGAZINE)
if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
result = magazine_obj_get(cache);
 
if (!result) {
/kernel/trunk/generic/src/mm/frame.c
115,6 → 115,7
link_t *tmp;
zone_t *zone = NULL;
frame_t *frame = NULL;
int freed;
__address v;
loop:
135,9 → 136,20
zone = find_free_zone(order);
/* If no memory, reclaim some slab memory,
if it does not help, reclaim all */
if (!zone && !(flags & FRAME_NO_RECLAIM))
if (slab_reclaim(0) || slab_reclaim(SLAB_RECLAIM_ALL))
if (!zone && !(flags & FRAME_NO_RECLAIM)) {
spinlock_unlock(&zone_head_lock);
freed = slab_reclaim(0);
spinlock_lock(&zone_head_lock);
if (freed)
zone = find_free_zone(order);
if (!zone) {
spinlock_unlock(&zone_head_lock);
freed = slab_reclaim(SLAB_RECLAIM_ALL);
spinlock_lock(&zone_head_lock);
if (freed)
zone = find_free_zone(order);
}
}
}
 
if (!zone) {