Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 766 → Rev 767

/kernel/trunk/test/mm/slab2/test.c
0,0 → 1,110
/*
* Copyright (C) 2006 Ondrej Palkovsky
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <test.h>
#include <mm/slab.h>
#include <print.h>
#include <proc/thread.h>
#include <arch.h>
#include <panic.h>
#include <mm/frame.h>
 
#define ITEM_SIZE 256
 
/** Fill memory with 2 caches, when allocation fails,
* free one of the caches. We should have everything in magazines,
* now allocation should clean magazines and allow for full allocation.
*/
static void totalmemtest(void)
{
slab_cache_t *cache1;
slab_cache_t *cache2;
int i;
 
void *data1, *data2;
void *olddata1=NULL, *olddata2=NULL;
cache1 = slab_cache_create("cache1_tst", ITEM_SIZE, 0, NULL, NULL, 0);
cache2 = slab_cache_create("cache2_tst", ITEM_SIZE, 0, NULL, NULL, 0);
 
printf("Allocating...");
/* Use atomic alloc, so that we find end of memory */
do {
data1 = slab_alloc(cache1, FRAME_ATOMIC);
data2 = slab_alloc(cache2, FRAME_ATOMIC);
if (!data1 || !data2) {
if (data1)
slab_free(cache1,data1);
if (data2)
slab_free(cache2,data2);
break;
}
 
*((void **)data1) = olddata1;
*((void **)data2) = olddata2;
olddata1 = data1;
olddata2 = data2;
}while(1);
printf("done.\n");
slab_print_list();
/* We do not have memory - now deallocate cache2 */
printf("Deallocating cache2...");
while (olddata2) {
data2 = *((void **)olddata2);
slab_free(cache2, olddata2);
olddata2 = data2;
}
printf("done.\n");
 
slab_print_list();
printf("Allocating to cache1...\n");
for (i=0; i<30; i++) {
data1 = slab_alloc(cache1, FRAME_ATOMIC);
if (!data1) {
panic("Incorrect memory size - use another test.");
}
*((void **)data1) = olddata1;
olddata1 = data1;
}
slab_print_list();
while (1) {
data1 = slab_alloc(cache1, FRAME_ATOMIC);
if (!data1) {
break;
}
*((void **)data1) = olddata1;
olddata1 = data1;
}
slab_print_list();
}
 
void test(void)
{
totalmemtest();
}
/kernel/trunk/kernel.config
80,6 → 80,7
@ "mm/mapping1" Mapping test 1
@ "mm/falloc1" Frame Allocation test 1
@ "mm/falloc2" Frame Allocation test 2
@ "mm/slab1" SLAB Allocator test 1
@ "mm/slab1" SLAB test1 - No CPU-cache
@ "mm/slab2" SLAB test2 - SMP CPU cache
@ [ARCH=mips32] "debug/mips1" Mips breakpoint-debug test
! CONFIG_TEST (choice)
/kernel/trunk/generic/include/mm/slab.h
75,6 → 75,7
/* Statistics */
atomic_t allocated_slabs;
atomic_t allocated_objs;
atomic_t cached_objs;
 
/* Slabs */
link_t full_slabs; /**< List of full slabs */
/kernel/trunk/generic/src/mm/slab.c
96,6 → 96,7
slab->start = data;
slab->available = cache->objects;
slab->nextavail = 0;
slab->cache = cache;
 
for (i=0; i<cache->objects;i++)
*((int *) (slab->start + i*cache->size)) = i+1;
151,6 → 152,8
if (!slab)
slab = obj2slab(obj);
 
ASSERT(slab->cache == cache);
 
*((int *)obj) = slab->nextavail;
slab->nextavail = (obj - slab->start)/cache->size;
slab->available++;
230,8 → 233,10
int i;
count_t frames = 0;
 
for (i=0;i < mag->busy; i++)
for (i=0;i < mag->busy; i++) {
frames += slab_obj_destroy(cache, mag->objs[i], NULL);
atomic_dec(&cache->cached_objs);
}
slab_free(&mag_cache, mag);
 
246,6 → 251,7
static void * magazine_obj_get(slab_cache_t *cache)
{
slab_magazine_t *mag;
void *obj;
 
spinlock_lock(&cache->mag_cache[CPU->id].lock);
 
279,8 → 285,11
spinlock_unlock(&cache->lock);
}
gotit:
obj = mag->objs[--mag->busy];
spinlock_unlock(&cache->mag_cache[CPU->id].lock);
return mag->objs[--mag->busy];
atomic_dec(&cache->cached_objs);
return obj;
out:
spinlock_unlock(&cache->mag_cache[CPU->id].lock);
return NULL;
337,6 → 346,7
mag->objs[mag->busy++] = obj;
 
spinlock_unlock(&cache->mag_cache[CPU->id].lock);
atomic_inc(&cache->cached_objs);
return 0;
errout:
spinlock_unlock(&cache->mag_cache[CPU->id].lock);
467,7 → 477,6
if (flags & SLAB_RECLAIM_ALL) {
/* Aggressive memfree */
 
/* Destroy CPU magazines */
for (i=0; i<config.cpu_count; i++) {
mag = cache->mag_cache[i].current;
483,11 → 492,13
}
/* Destroy full magazines */
cur=cache->magazines.prev;
 
while (cur!=&cache->magazines) {
mag = list_get_instance(cur, slab_magazine_t, link);
cur = cur->prev;
list_remove(cur->next);
// list_remove(&mag->link);
frames += magazine_destroy(cache,mag);
/* If we do not do full reclaim, break
* as soon as something is freed */
596,12 → 607,13
link_t *cur;
 
spinlock_lock(&slab_cache_lock);
printf("SLAB name\tOsize\tPages\tOcnt\tSlabs\tAllocobjs\tCtl\n");
printf("SLAB name\tOsize\tPages\tObj/pg\tSlabs\tCached\tAllocobjs\tCtl\n");
for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
cache = list_get_instance(cur, slab_cache_t, link);
printf("%s\t%d\t%d\t%d\t%d\t%d\t\t%s\n", cache->name, cache->size,
printf("%s\t%d\t%d\t%d\t%d\t%d\t%d\t\t%s\n", cache->name, cache->size,
(1 << cache->order), cache->objects,
atomic_get(&cache->allocated_slabs),
atomic_get(&cache->allocated_slabs),
atomic_get(&cache->cached_objs),
atomic_get(&cache->allocated_objs),
cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out");
}