32,10 → 32,12 |
#include <memstr.h> |
#include <align.h> |
#include <mm/heap.h> |
#include <mm/frame.h> |
#include <config.h> |
#include <print.h> |
#include <arch.h> |
#include <panic.h> |
#include <debug.h> |
|
SPINLOCK_INITIALIZE(slab_cache_lock); |
LIST_INITIALIZE(slab_cache_list); |
42,29 → 44,170 |
|
slab_cache_t mag_cache; |
|
|
typedef struct { |
slab_cache_t *cache; /**< Pointer to parent cache */ |
link_t link; /* List of full/partial slabs */ |
void *start; /**< Start address of first available item */ |
count_t available; /**< Count of available items in this slab */ |
index_t nextavail; /**< The index of next available item */ |
}slab_t; |
|
/**************************************/ |
/* SLAB low level functions */ |
/* SLAB allocation functions */ |
|
/** |
* Allocate frames for slab space and initialize |
* |
* TODO: Change slab_t allocation to slab_alloc(????), malloc with flags!! |
*/ |
static slab_t * slab_space_alloc(slab_cache_t *cache, int flags) |
{ |
void *data; |
slab_t *slab; |
size_t fsize; |
int i; |
zone_t *zone = NULL; |
int status; |
|
data = (void *)frame_alloc(FRAME_KA | flags, cache->order, &status, &zone); |
if (status != FRAME_OK) |
return NULL; |
|
if (! cache->flags & SLAB_CACHE_SLINSIDE) { |
slab = malloc(sizeof(*slab)); // , flags); |
if (!slab) { |
frame_free((__address)data); |
return NULL; |
} |
} else { |
fsize = (PAGE_SIZE << cache->order); |
slab = data + fsize - sizeof(*slab); |
} |
|
/* Fill in slab structures */ |
/* TODO: some better way of accessing the frame, although |
* the optimizer might optimize the division out :-/ */ |
for (i=0; i< (1<<cache->order); i++) { |
ADDR2FRAME(zone, (__address)(data+i*PAGE_SIZE))->parent = slab; |
} |
|
slab->start = data; |
slab->available = cache->objects; |
slab->nextavail = 0; |
|
for (i=0; i<cache->objects;i++) |
*((int *) (slab->start + i*cache->size)) = i+1; |
return slab; |
} |
|
/** |
* Free space associated with SLAB |
* |
* @return number of freed frames |
*/ |
static count_t slab_space_free(slab_cache_t *cache, slab_t *slab) |
{ |
frame_free((__address)slab->start); |
if (! cache->flags & SLAB_CACHE_SLINSIDE) |
free(slab); |
return 1 << cache->order; |
} |
|
/** Map object to slab structure */ |
static slab_t * obj2slab(void *obj) |
{ |
frame_t *frame; |
|
frame = frame_addr2frame((__address)obj); |
return (slab_t *)frame->parent; |
} |
|
/**************************************/ |
/* SLAB functions */ |
|
|
/** |
* Return object to slab and call a destructor |
* |
* Assume the cache->lock is held; |
* |
* @param slab If the caller knows directly slab of the object, otherwise NULL |
* |
* @return Number of freed pages |
*/ |
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj) |
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, |
slab_t *slab) |
{ |
return 0; |
count_t frames = 0; |
|
if (!slab) |
slab = obj2slab(obj); |
|
spinlock_lock(cache->lock); |
|
*((int *)obj) = slab->nextavail; |
slab->nextavail = (obj - slab->start)/cache->size; |
slab->available++; |
|
/* Move it to correct list */ |
if (slab->available == 1) { |
/* It was in full, move to partial */ |
list_remove(&slab->link); |
list_prepend(&cache->partial_slabs, &slab->link); |
} |
if (slab->available == cache->objects) { |
/* Free associated memory */ |
list_remove(&slab->link); |
/* Avoid deadlock */ |
spinlock_unlock(&cache->lock); |
frames = slab_space_free(cache, slab); |
spinlock_lock(&cache->lock); |
} |
|
spinlock_unlock(cache->lock); |
|
return frames; |
} |
|
|
/** |
* Take new object from slab or create new if needed |
* |
* Assume cache->lock is held. |
* |
* @return Object address or null |
*/ |
static void * slab_obj_create(slab_cache_t *cache, int flags) |
{ |
return NULL; |
slab_t *slab; |
void *obj; |
|
if (list_empty(&cache->partial_slabs)) { |
/* Allow recursion and reclaiming |
* - this should work, as the SLAB control structures |
* are small and do not need to allocte with anything |
* other ten frame_alloc when they are allocating, |
* that's why we should get recursion at most 1-level deep |
*/ |
spinlock_unlock(&cache->lock); |
slab = slab_space_alloc(cache, flags); |
spinlock_lock(&cache->lock); |
if (!slab) |
return NULL; |
} else { |
slab = list_get_instance(cache->partial_slabs.next, |
slab_t, |
link); |
list_remove(&slab->link); |
} |
obj = slab->start + slab->nextavail * cache->size; |
slab->nextavail = *((int *)obj); |
slab->available--; |
if (! slab->available) |
list_prepend(&cache->full_slabs, &slab->link); |
else |
list_prepend(&cache->partial_slabs, &slab->link); |
return obj; |
} |
|
/**************************************/ |
73,7 → 216,7 |
/** |
* Free all objects in magazine and free memory associated with magazine |
* |
* Assume cpu->lock is locked |
* Assume mag_cache[cpu].lock is locked |
* |
* @return Number of freed pages |
*/ |
84,7 → 227,7 |
count_t frames = 0; |
|
for (i=0;i < mag->busy; i++) |
frames += slab_obj_destroy(cache, mag->objs[i]); |
frames += slab_obj_destroy(cache, mag->objs[i], NULL); |
|
slab_free(&mag_cache, mag); |
|
115,7 → 258,7 |
mag = cache->mag_cache[CPU->id].current; |
goto gotit; |
} |
/* If still not busy, exchange current with some frome |
/* If still not busy, exchange current with some from |
* other full magazines */ |
spinlock_lock(&cache->lock); |
if (list_empty(&cache->magazines)) { |
161,7 → 304,7 |
/* We do not want to sleep just because of caching */ |
/* Especially we do not want reclaiming to start, as |
* this would deadlock */ |
mag = slab_alloc(&mag_cache, SLAB_ATOMIC | SLAB_NO_RECLAIM); |
mag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); |
if (!mag) /* Allocation failed, give up on caching */ |
goto errout; |
|
175,7 → 318,7 |
if (mag) |
list_prepend(&cache->magazines, &mag->link); |
|
mag = slab_alloc(&mag_cache, SLAB_ATOMIC | SLAB_NO_RECLAIM); |
mag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); |
if (!mag) |
goto errout; |
|
198,8 → 341,30 |
|
|
/**************************************/ |
/* Top level SLAB functions */ |
/* SLAB CACHE functions */ |
|
/** Return number of objects that fit in certain cache size */ |
static int comp_objects(slab_cache_t *cache) |
{ |
if (cache->flags & SLAB_CACHE_SLINSIDE) |
return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size; |
else |
return (PAGE_SIZE << cache->order) / cache->size; |
} |
|
/** Return wasted space in slab */ |
static int badness(slab_cache_t *cache) |
{ |
int objects; |
int ssize; |
|
objects = comp_objects(cache); |
ssize = PAGE_SIZE << cache->order; |
if (cache->flags & SLAB_CACHE_SLINSIDE) |
ssize -= sizeof(slab_t); |
return ssize - objects*cache->size; |
} |
|
/** Initialize allocated memory as a slab cache */ |
static void |
_slab_cache_create(slab_cache_t *cache, |
214,9 → 379,10 |
|
memsetb((__address)cache, sizeof(*cache), 0); |
cache->name = name; |
cache->align = align; |
|
cache->size = ALIGN_UP(size, align); |
if (align) |
size = ALIGN_UP(size, align); |
cache->size = size; |
|
cache->constructor = constructor; |
cache->destructor = destructor; |
236,8 → 402,15 |
if (cache->size < SLAB_INSIDE_SIZE) |
cache->flags |= SLAB_CACHE_SLINSIDE; |
|
|
/* Minimum slab order */ |
cache->order = (cache->size / PAGE_SIZE) + 1; |
|
while (badness(cache) > SLAB_MAX_BADNESS(cache)) { |
cache->order += 1; |
} |
|
cache->objects = comp_objects(cache); |
|
spinlock_lock(&slab_cache_lock); |
|
list_append(&cache->link, &slab_cache_list); |
266,6 → 439,8 |
* |
* @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing |
* @return Number of freed pages |
* |
* TODO: Add light reclaim |
*/ |
static count_t _slab_reclaim(slab_cache_t *cache, int flags) |
{ |
283,6 → 458,8 |
spinlock_lock(&cache->lock); |
|
if (flags & SLAB_RECLAIM_ALL) { |
/* Aggressive memfree */ |
|
/* Destroy CPU magazines */ |
for (i=0; i<config.cpu_count; i++) { |
mag = cache->mag_cache[i].current; |
295,16 → 472,20 |
frames += magazine_destroy(cache, mag); |
cache->mag_cache[i].last = NULL; |
} |
/* Destroy full magazines */ |
cur=cache->magazines.next; |
while (cur!=&cache->magazines) { |
mag = list_get_instance(cur, slab_magazine_t, link); |
|
cur = cur->next; |
list_remove(cur->prev); |
frames += magazine_destroy(cache,mag); |
} |
} |
/* Destroy full magazines */ |
cur=cache->magazines.prev; |
while (cur!=&cache->magazines) { |
mag = list_get_instance(cur, slab_magazine_t, link); |
|
cur = cur->prev; |
list_remove(cur->next); |
frames += magazine_destroy(cache,mag); |
/* If we do not do full reclaim, break |
* as soon as something is freed */ |
if (!(flags & SLAB_RECLAIM_ALL) && frames) |
break; |
} |
|
spinlock_unlock(&cache->lock); |
for (i=0; i < config.cpu_count; i++) |
347,8 → 528,11 |
if (!cache->flags & SLAB_CACHE_NOMAGAZINE) |
result = magazine_obj_get(cache); |
|
if (!result) |
if (!result) { |
spinlock_lock(&cache->lock); |
result = slab_obj_create(cache, flags); |
spinlock_unlock(&cache->lock); |
} |
|
interrupts_restore(ipl); |
|
362,11 → 546,12 |
|
ipl = interrupts_disable(); |
|
if (cache->flags & SLAB_CACHE_NOMAGAZINE) |
slab_obj_destroy(cache, obj); |
else { |
if (magazine_obj_put(cache, obj)) /* If magazine put failed */ |
slab_obj_destroy(cache, obj); |
if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \ |
|| magazine_obj_put(cache, obj)) { |
|
spinlock_lock(&cache->lock); |
slab_obj_destroy(cache, obj, NULL); |
spinlock_unlock(&cache->lock); |
} |
interrupts_restore(ipl); |
} |
398,10 → 583,10 |
link_t *cur; |
|
spinlock_lock(&slab_cache_lock); |
printf("SLAB name\tObj size\n"); |
printf("SLAB name\tOsize\tOrder\n"); |
for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
cache = list_get_instance(cur, slab_cache_t, link); |
printf("%s\t%d\n", cache->name, cache->size); |
printf("%s\t%d\t%d\n", cache->name, cache->size, cache->order); |
} |
spinlock_unlock(&slab_cache_lock); |
} |