Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 771 → Rev 770

/kernel/trunk/generic/include/bitops.h
File deleted
/kernel/trunk/generic/include/mm/slab.h
33,12 → 33,6
#include <synch/spinlock.h>
#include <arch/atomic.h>
 
/** Minimum size to be allocated by malloc */
#define SLAB_MIN_MALLOC_W 3
 
/** Maximum size to be allocated by malloc */
#define SLAB_MAX_MALLOC_W 17
 
/** Initial Magazine size (TODO: dynamically growing magazines) */
#define SLAB_MAG_SIZE 4
 
46,7 → 40,7
#define SLAB_INSIDE_SIZE (PAGE_SIZE >> 3)
 
/** Maximum wasted space we allow for cache */
#define SLAB_MAX_BADNESS(cache) ((PAGE_SIZE << (cache)->order) >> 2)
#define SLAB_MAX_BADNESS(cache) ((PAGE_SIZE << (cache)->order >> 2))
 
/* slab_reclaim constants */
#define SLAB_RECLAIM_ALL 0x1 /**< Reclaim all possible memory, because
115,8 → 109,4
/* KConsole debug */
extern void slab_print_list(void);
 
/* Malloc support */
extern void * kalloc(unsigned int size, int flags);
extern void kfree(void *obj);
 
#endif
/kernel/trunk/generic/include/mm/heap.h
31,12 → 31,9
 
#include <arch/types.h>
#include <typedefs.h>
#include <mm/slab.h>
 
//#define malloc(size) early_malloc(size)
//#define free(ptr) early_free(ptr)
#define malloc(size) kalloc(size,0)
#define free(ptr) kfree(ptr)
#define malloc(size) early_malloc(size)
#define free(ptr) early_free(ptr)
 
struct chunk {
int used;
/kernel/trunk/generic/src/mm/slab.c
92,7 → 92,6
#include <arch.h>
#include <panic.h>
#include <debug.h>
#include <bitops.h>
 
SPINLOCK_INITIALIZE(slab_cache_lock);
static LIST_INITIALIZE(slab_cache_list);
109,14 → 108,6
* their caches do not require further allocation
*/
static slab_cache_t *slab_extern_cache;
/** Caches for malloc */
static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1];
char *malloc_names[] = {
"malloc-8","malloc-16","malloc-32","malloc-64","malloc-128",
"malloc-256","malloc-512","malloc-1K","malloc-2K",
"malloc-4K","malloc-8K","malloc-16K","malloc-32K",
"malloc-64K","malloc-128K"
};
 
/** Slab descriptor */
typedef struct {
487,7 → 478,6
int flags)
{
int i;
int pages;
 
memsetb((__address)cache, sizeof(*cache), 0);
cache->name = name;
517,8 → 507,7
cache->flags |= SLAB_CACHE_SLINSIDE;
 
/* Minimum slab order */
pages = ((cache->size-1) >> PAGE_WIDTH) + 1;
cache->order = fnzb(pages);
cache->order = (cache->size-1) >> PAGE_WIDTH;
 
while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
cache->order += 1;
643,8 → 632,8
 
/* Disable interrupts to avoid deadlocks with interrupt handlers */
ipl = interrupts_disable();
 
if (!(cache->flags & SLAB_CACHE_NOMAGAZINE) && CPU)
if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
result = magazine_obj_get(cache);
 
if (!result) {
661,8 → 650,8
return result;
}
 
/** Return object to cache, use slab if known */
static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab)
/** Return object to cache */
void slab_free(slab_cache_t *cache, void *obj)
{
ipl_t ipl;
 
669,11 → 658,10
ipl = interrupts_disable();
 
if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
|| !CPU \
|| magazine_obj_put(cache, obj)) {
spinlock_lock(&cache->lock);
slab_obj_destroy(cache, obj, slab);
slab_obj_destroy(cache, obj, NULL);
spinlock_unlock(&cache->lock);
}
interrupts_restore(ipl);
680,12 → 668,6
atomic_dec(&cache->allocated_objs);
}
 
/** Return slab object to cache */
void slab_free(slab_cache_t *cache, void *obj)
{
_slab_free(cache,obj,NULL);
}
 
/* Go through all caches and reclaim what is possible */
count_t slab_reclaim(int flags)
{
728,8 → 710,6
 
void slab_cache_init(void)
{
int i, size;
 
/* Initialize magazine cache */
_slab_cache_create(&mag_cache,
"slab_magazine",
751,35 → 731,4
SLAB_CACHE_SLINSIDE);
 
/* Initialize structures for malloc */
for (i=0, size=(1<<SLAB_MIN_MALLOC_W);
i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1);
i++, size <<= 1) {
malloc_caches[i] = slab_cache_create(malloc_names[i],
size, 0,
NULL,NULL,0);
}
}
 
/**************************************/
/* kalloc/kfree functions */
void * kalloc(unsigned int size, int flags)
{
int idx;
 
ASSERT( size && size <= (1 << SLAB_MAX_MALLOC_W));
if (size < (1 << SLAB_MIN_MALLOC_W))
size = (1 << SLAB_MIN_MALLOC_W);
 
idx = fnzb(size-1) - SLAB_MIN_MALLOC_W + 1;
 
return slab_alloc(malloc_caches[idx], flags);
}
 
 
void kfree(void *obj)
{
slab_t *slab = obj2slab(obj);
_slab_free(slab->cache, obj, slab);
}
/kernel/trunk/generic/src/mm/as.c
78,7 → 78,7
{
as_t *as;
 
as = (as_t *) early_malloc(sizeof(as_t));
as = (as_t *) malloc(sizeof(as_t));
if (as) {
list_initialize(&as->as_with_asid_link);
spinlock_initialize(&as->lock, "as_lock");