/kernel/trunk/generic/include/bitops.h |
---|
0,0 → 1,66 |
/* |
* Copyright (C) 2006 Ondrej Palkovsky |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#ifndef _BITOPS_H_ |
#define _BITOPS_H_ |
#include <typedefs.h> |
/** Return position of first non-zero bit from left. |
* |
* If number is zero, it returns 0 |
*/ |
static inline int fnzb32(__u32 arg) |
{ |
int n = 0; |
if (arg & 0xffff0000) { arg >>= 16;n += 16;} |
if (arg & 0xff00) { arg >>= 8; n += 8;} |
if (arg & 0xf0) { arg >>= 4; n += 4;} |
if (arg & 0xc) { arg >>= 2; n+=2;} |
if (arg & 0x2) { arg >>= 1; n+=1;} |
return n; |
} |
static inline int fnzb64(__u64 arg) |
{ |
int n = 0; |
/* This is because mips complains about big numbers, |
* other platforms should optimize it out */ |
__u64 oper = 0xffffffff; |
oper <<= 32; |
if (arg & oper) { arg >>= 32;n += 32;} |
return n + fnzb32((__u32) arg); |
} |
#define fnzb(x) fnzb32(x) |
#endif |
/kernel/trunk/generic/include/mm/heap.h |
---|
31,9 → 31,12 |
#include <arch/types.h> |
#include <typedefs.h> |
#include <mm/slab.h> |
#define malloc(size) early_malloc(size) |
#define free(ptr) early_free(ptr) |
//#define malloc(size) early_malloc(size) |
//#define free(ptr) early_free(ptr) |
#define malloc(size) kalloc(size,0) |
#define free(ptr) kfree(ptr) |
struct chunk { |
int used; |
/kernel/trunk/generic/include/mm/slab.h |
---|
33,6 → 33,12 |
#include <synch/spinlock.h> |
#include <arch/atomic.h> |
/** Minimum size to be allocated by malloc */ |
#define SLAB_MIN_MALLOC_W 3 |
/** Maximum size to be allocated by malloc */ |
#define SLAB_MAX_MALLOC_W 17 |
/** Initial Magazine size (TODO: dynamically growing magazines) */ |
#define SLAB_MAG_SIZE 4 |
40,7 → 46,7 |
#define SLAB_INSIDE_SIZE (PAGE_SIZE >> 3) |
/** Maximum wasted space we allow for cache */ |
#define SLAB_MAX_BADNESS(cache) ((PAGE_SIZE << (cache)->order >> 2)) |
#define SLAB_MAX_BADNESS(cache) ((PAGE_SIZE << (cache)->order) >> 2) |
/* slab_reclaim constants */ |
#define SLAB_RECLAIM_ALL 0x1 /**< Reclaim all possible memory, because |
109,4 → 115,8 |
/* KConsole debug */ |
extern void slab_print_list(void); |
/* Malloc support */ |
extern void * kalloc(unsigned int size, int flags); |
extern void kfree(void *obj); |
#endif |
/kernel/trunk/generic/src/mm/slab.c |
---|
92,6 → 92,7 |
#include <arch.h> |
#include <panic.h> |
#include <debug.h> |
#include <bitops.h> |
SPINLOCK_INITIALIZE(slab_cache_lock); |
static LIST_INITIALIZE(slab_cache_list); |
108,6 → 109,14 |
* their caches do not require further allocation |
*/ |
static slab_cache_t *slab_extern_cache; |
/** Caches for malloc */ |
static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1]; |
char *malloc_names[] = { |
"malloc-8","malloc-16","malloc-32","malloc-64","malloc-128", |
"malloc-256","malloc-512","malloc-1K","malloc-2K", |
"malloc-4K","malloc-8K","malloc-16K","malloc-32K", |
"malloc-64K","malloc-128K" |
}; |
/** Slab descriptor */ |
typedef struct { |
478,6 → 487,7 |
int flags) |
{ |
int i; |
int pages; |
memsetb((__address)cache, sizeof(*cache), 0); |
cache->name = name; |
507,7 → 517,8 |
cache->flags |= SLAB_CACHE_SLINSIDE; |
/* Minimum slab order */ |
cache->order = (cache->size-1) >> PAGE_WIDTH; |
pages = ((cache->size-1) >> PAGE_WIDTH) + 1; |
cache->order = fnzb(pages); |
while (badness(cache) > SLAB_MAX_BADNESS(cache)) { |
cache->order += 1; |
633,7 → 644,7 |
/* Disable interrupts to avoid deadlocks with interrupt handlers */ |
ipl = interrupts_disable(); |
if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) |
if (!(cache->flags & SLAB_CACHE_NOMAGAZINE) && CPU) |
result = magazine_obj_get(cache); |
if (!result) { |
650,8 → 661,8 |
return result; |
} |
/** Return object to cache */ |
void slab_free(slab_cache_t *cache, void *obj) |
/** Return object to cache, use slab if known */ |
static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) |
{ |
ipl_t ipl; |
658,10 → 669,11 |
ipl = interrupts_disable(); |
if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \ |
|| !CPU \ |
|| magazine_obj_put(cache, obj)) { |
spinlock_lock(&cache->lock); |
slab_obj_destroy(cache, obj, NULL); |
slab_obj_destroy(cache, obj, slab); |
spinlock_unlock(&cache->lock); |
} |
interrupts_restore(ipl); |
668,6 → 680,12 |
atomic_dec(&cache->allocated_objs); |
} |
/** Return slab object to cache */ |
void slab_free(slab_cache_t *cache, void *obj) |
{ |
_slab_free(cache,obj,NULL); |
} |
/* Go through all caches and reclaim what is possible */ |
count_t slab_reclaim(int flags) |
{ |
710,6 → 728,8 |
void slab_cache_init(void) |
{ |
int i, size; |
/* Initialize magazine cache */ |
_slab_cache_create(&mag_cache, |
"slab_magazine", |
731,4 → 751,35 |
SLAB_CACHE_SLINSIDE); |
/* Initialize structures for malloc */ |
for (i=0, size=(1<<SLAB_MIN_MALLOC_W); |
i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1); |
i++, size <<= 1) { |
malloc_caches[i] = slab_cache_create(malloc_names[i], |
size, 0, |
NULL,NULL,0); |
} |
} |
/**************************************/ |
/* kalloc/kfree functions */ |
void * kalloc(unsigned int size, int flags) |
{ |
int idx; |
ASSERT( size && size <= (1 << SLAB_MAX_MALLOC_W)); |
if (size < (1 << SLAB_MIN_MALLOC_W)) |
size = (1 << SLAB_MIN_MALLOC_W); |
idx = fnzb(size-1) - SLAB_MIN_MALLOC_W + 1; |
return slab_alloc(malloc_caches[idx], flags); |
} |
void kfree(void *obj) |
{ |
slab_t *slab = obj2slab(obj); |
_slab_free(slab->cache, obj, slab); |
} |
/kernel/trunk/generic/src/mm/as.c |
---|
78,7 → 78,7 |
{ |
as_t *as; |
as = (as_t *) malloc(sizeof(as_t)); |
as = (as_t *) early_malloc(sizeof(as_t)); |
if (as) { |
list_initialize(&as->as_with_asid_link); |
spinlock_initialize(&as->lock, "as_lock"); |