Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 788 → Rev 789

/kernel/trunk/generic/include/mm/slab.h
55,6 → 55,8
/* cache_create flags */
#define SLAB_CACHE_NOMAGAZINE 0x1 /**< Do not use per-cpu cache */
#define SLAB_CACHE_SLINSIDE 0x2 /**< Have control structure inside SLAB */
/** We add magazine cache later, if we have this flag */
#define SLAB_CACHE_MAGDEFERRED (0x4 | SLAB_CACHE_NOMAGAZINE)
 
typedef struct {
link_t link;
64,6 → 66,13
}slab_magazine_t;
 
typedef struct {
slab_magazine_t *current;
slab_magazine_t *last;
SPINLOCK_DECLARE(lock);
}slab_mag_cache_t;
 
 
typedef struct {
char *name;
 
link_t link;
92,11 → 101,7
SPINLOCK_DECLARE(maglock);
 
/** CPU cache */
struct {
slab_magazine_t *current;
slab_magazine_t *last;
SPINLOCK_DECLARE(lock);
}mag_cache[0];
slab_mag_cache_t *mag_cache;
}slab_cache_t;
 
extern slab_cache_t * slab_cache_create(char *name,
113,6 → 118,7
 
/** Initialize SLAB subsytem */
extern void slab_cache_init(void);
extern void slab_enable_cpucache(void);
 
/* KConsole debug */
extern void slab_print_list(void);
120,5 → 126,4
/* Malloc support */
extern void * kalloc(unsigned int size, int flags);
extern void kfree(void *obj);
 
#endif
/kernel/trunk/generic/include/mm/as.h
49,7 → 49,6
#define UDATA_ADDRESS UDATA_ADDRESS_ARCH
 
#define FLAG_AS_KERNEL (1 << 0) /**< Kernel address space. */
#define FLAG_AS_EARLYMALLOC (1 << 1) /**< Use early malloc */
 
enum as_area_type {
AS_AREA_TEXT = 1, AS_AREA_DATA, AS_AREA_STACK
/kernel/trunk/generic/src/main/main.c
160,6 → 160,7
arch_pre_mm_init();
early_heap_init(config.heap_addr, config.heap_size + config.heap_delta);
frame_init();
slab_cache_init();
as_init();
page_init();
tlb_init();
173,7 → 174,7
arch_pre_smp_init();
smp_init();
/* Slab must be initialized AFTER we know the number of processors */
slab_cache_init();
slab_enable_cpucache();
 
printf("config.memory_size=%dM\n", config.memory_size/(1024*1024));
printf("config.cpu_count=%d\n", config.cpu_count);
/kernel/trunk/generic/src/mm/slab.c
112,7 → 112,8
static slab_cache_t mag_cache;
/** Cache for cache descriptors */
static slab_cache_t slab_cache_cache;
 
/** Cache for magcache structure from cache_t */
static slab_cache_t *cpu_cache = NULL;
/** Cache for external slab descriptors
* This time we want per-cpu cache, so do not make it static
* - using SLAB for internal SLAB structures will not deadlock,
234,12 → 235,12
slab = obj2slab(obj);
 
ASSERT(slab->cache == cache);
ASSERT(slab->available < cache->objects);
 
if (cache->destructor)
freed = cache->destructor(obj);
spinlock_lock(&cache->slablock);
ASSERT(slab->available < cache->objects);
 
*((int *)obj) = slab->nextavail;
slab->nextavail = (obj - slab->start)/cache->size;
536,6 → 537,23
return ssize - objects*cache->size;
}
 
/**
* Initialize mag_cache structure in slab cache
*/
static void make_magcache(slab_cache_t *cache)
{
int i;
 
ASSERT(cpu_cache);
cache->mag_cache = slab_alloc(cpu_cache, 0);
for (i=0; i < config.cpu_count; i++) {
memsetb((__address)&cache->mag_cache[i],
sizeof(cache->mag_cache[i]), 0);
spinlock_initialize(&cache->mag_cache[i].lock,
"slab_maglock_cpu");
}
}
 
/** Initialize allocated memory as a slab cache */
static void
_slab_cache_create(slab_cache_t *cache,
546,7 → 564,6
int (*destructor)(void *obj),
int flags)
{
int i;
int pages;
ipl_t ipl;
 
568,14 → 585,8
list_initialize(&cache->magazines);
spinlock_initialize(&cache->slablock, "slab_lock");
spinlock_initialize(&cache->maglock, "slab_maglock");
if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) {
for (i=0; i < config.cpu_count; i++) {
memsetb((__address)&cache->mag_cache[i],
sizeof(cache->mag_cache[i]), 0);
spinlock_initialize(&cache->mag_cache[i].lock,
"slab_maglock_cpu");
}
}
if (! (cache->flags & SLAB_CACHE_NOMAGAZINE))
make_magcache(cache);
 
/* Compute slab sizes, object counts in slabs etc. */
if (cache->size < SLAB_INSIDE_SIZE)
696,6 → 707,8
|| !list_empty(&cache->partial_slabs))
panic("Destroying cache that is not empty.");
 
if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
slab_free(cpu_cache, cache->mag_cache);
slab_free(&slab_cache_cache, cache);
}
 
810,7 → 823,7
/* Initialize slab_cache cache */
_slab_cache_create(&slab_cache_cache,
"slab_cache",
sizeof(slab_cache_cache) + config.cpu_count*sizeof(slab_cache_cache.mag_cache[0]),
sizeof(slab_cache_cache),
sizeof(__address),
NULL, NULL,
SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
818,7 → 831,7
slab_extern_cache = slab_cache_create("slab_extern",
sizeof(slab_t),
0, NULL, NULL,
SLAB_CACHE_SLINSIDE);
SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED);
 
/* Initialize structures for malloc */
for (i=0, size=(1<<SLAB_MIN_MALLOC_W);
826,7 → 839,7
i++, size <<= 1) {
malloc_caches[i] = slab_cache_create(malloc_names[i],
size, 0,
NULL,NULL,0);
NULL,NULL, SLAB_CACHE_MAGDEFERRED);
}
#ifdef CONFIG_DEBUG
_slab_initialized = 1;
833,6 → 846,35
#endif
}
 
/** Enable cpu_cache
*
* Kernel calls this function, when it knows the real number of
* processors.
* Allocate slab for cpucache and enable it on all existing
* slabs that are SLAB_CACHE_MAGDEFERRED
*/
void slab_enable_cpucache(void)
{
link_t *cur;
slab_cache_t *s;
 
cpu_cache = slab_cache_create("magcpucache",
sizeof(slab_mag_cache_t) * config.cpu_count,
0, NULL, NULL,
SLAB_CACHE_NOMAGAZINE);
spinlock_lock(&slab_cache_lock);
for (cur=slab_cache_list.next; cur != &slab_cache_list;cur=cur->next){
s = list_get_instance(cur, slab_cache_t, link);
if ((s->flags & SLAB_CACHE_MAGDEFERRED) != SLAB_CACHE_MAGDEFERRED)
continue;
make_magcache(s);
s->flags &= ~SLAB_CACHE_MAGDEFERRED;
}
 
spinlock_unlock(&slab_cache_lock);
}
 
/**************************************/
/* kalloc/kfree functions */
void * kalloc(unsigned int size, int flags)
/kernel/trunk/generic/src/mm/as.c
65,7 → 65,7
void as_init(void)
{
as_arch_init();
AS_KERNEL = as_create(FLAG_AS_KERNEL | FLAG_AS_EARLYMALLOC);
AS_KERNEL = as_create(FLAG_AS_KERNEL);
if (!AS_KERNEL)
panic("can't create kernel address space\n");
}
78,10 → 78,7
{
as_t *as;
 
if (flags & FLAG_AS_EARLYMALLOC)
as = (as_t *) early_malloc(sizeof(as_t));
else
as = (as_t *) malloc(sizeof(as_t));
as = (as_t *) malloc(sizeof(as_t));
if (as) {
list_initialize(&as->as_with_asid_link);
spinlock_initialize(&as->lock, "as_lock");