Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 3190 → Rev 3191

/branches/dynload/kernel/test/avltree/avltree1.c
48,7 → 48,8
 
static int test_tree_balance(avltree_node_t *node);
static avltree_node_t *test_tree_parents(avltree_node_t *node);
static void print_tree_structure_flat (avltree_node_t *node, int level);
static void print_tree_structure_flat (avltree_node_t *node, int level)
__attribute__ ((used));
static avltree_node_t *alloc_avltree_node(void);
 
static avltree_node_t *test_tree_parents(avltree_node_t *node)
61,14 → 62,15
if (node->lft) {
tmp = test_tree_parents(node->lft);
if (tmp != node) {
printf("Bad parent pointer key: %" PRIu64 ", address: %p\n",
tmp->key, node->lft);
printf("Bad parent pointer key: %" PRIu64
", address: %p\n", tmp->key, node->lft);
}
}
if (node->rgt) {
tmp = test_tree_parents(node->rgt);
if (tmp != node) {
printf("Bad parent pointer key: %" PRIu64 ", address: %p\n",
printf("Bad parent pointer key: %" PRIu64
", address: %p\n",
tmp->key,node->rgt);
}
}
94,7 → 96,8
* Prints the structure of the node, which is level levels from the top of the
* tree.
*/
static void print_tree_structure_flat(avltree_node_t *node, int level)
static void
print_tree_structure_flat(avltree_node_t *node, int level)
{
/*
* You can set the maximum level as high as you like.
130,6 → 133,7
for (i = 0; i < NODE_COUNT - 1; i++) {
avltree_nodes[i].par = &avltree_nodes[i + 1];
}
avltree_nodes[i].par = NULL;
/*
* Node keys which will be used for insertion. Up to NODE_COUNT size of
169,7 → 173,6
for (i = 21; i < NODE_COUNT; i++)
avltree_nodes[i].key = i * 3;
avltree_nodes[i].par = NULL;
first_free_node = &avltree_nodes[0];
}
 
/branches/dynload/kernel/test/mm/slab2.c
216,7 → 216,7
printf("Running stress test with size %d\n", size);
condvar_initialize(&thread_starter);
mutex_initialize(&starter_mutex);
mutex_initialize(&starter_mutex, MUTEX_PASSIVE);
 
thr_cache = slab_cache_create("thread_cache", size, 0, NULL, NULL, 0);
semaphore_initialize(&thr_sem,0);
/branches/dynload/kernel/genarch/src/mm/as_ht.c
72,7 → 72,7
{
if (flags & FLAG_AS_KERNEL) {
hash_table_create(&page_ht, PAGE_HT_ENTRIES, 2, &ht_operations);
mutex_initialize(&page_ht_lock);
mutex_initialize(&page_ht_lock, MUTEX_PASSIVE);
}
return NULL;
}
/branches/dynload/kernel/generic/include/synch/mutex.h
39,20 → 39,26
#include <synch/semaphore.h>
#include <synch/synch.h>
 
typedef enum {
MUTEX_PASSIVE,
MUTEX_ACTIVE
} mutex_type_t;
 
typedef struct {
mutex_type_t type;
semaphore_t sem;
} mutex_t;
 
#define mutex_lock(mtx) \
#define mutex_lock(mtx) \
_mutex_lock_timeout((mtx), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE)
#define mutex_trylock(mtx) \
#define mutex_trylock(mtx) \
_mutex_lock_timeout((mtx), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING)
#define mutex_lock_timeout(mtx, usec) \
#define mutex_lock_timeout(mtx, usec) \
_mutex_lock_timeout((mtx), (usec), SYNCH_FLAGS_NON_BLOCKING)
 
extern void mutex_initialize(mutex_t *mtx);
extern int _mutex_lock_timeout(mutex_t *mtx, uint32_t usec, int flags);
extern void mutex_unlock(mutex_t *mtx);
extern void mutex_initialize(mutex_t *, mutex_type_t);
extern int _mutex_lock_timeout(mutex_t *, uint32_t, int);
extern void mutex_unlock(mutex_t *);
 
#endif
 
/branches/dynload/kernel/generic/include/mm/slab.h
53,7 → 53,8
#define SLAB_INSIDE_SIZE (PAGE_SIZE >> 3)
 
/** Maximum wasted space we allow for cache */
#define SLAB_MAX_BADNESS(cache) (((unsigned int) PAGE_SIZE << (cache)->order) >> 2)
#define SLAB_MAX_BADNESS(cache) \
(((unsigned int) PAGE_SIZE << (cache)->order) >> 2)
 
/* slab_reclaim constants */
 
99,7 → 100,7
int flags;
 
/* Computed values */
uint8_t order; /**< Order of frames to be allocated */
uint8_t order; /**< Order of frames to be allocated */
unsigned int objects; /**< Number of objects that fit in */
 
/* Statistics */
121,14 → 122,13
slab_mag_cache_t *mag_cache;
} slab_cache_t;
 
extern slab_cache_t * slab_cache_create(char *name, size_t size, size_t align,
int (*constructor)(void *obj, int kmflag), int (*destructor)(void *obj),
int flags);
extern void slab_cache_destroy(slab_cache_t *cache);
extern slab_cache_t *slab_cache_create(char *, size_t, size_t,
int (*)(void *, int), int (*)(void *), int);
extern void slab_cache_destroy(slab_cache_t *);
 
extern void * slab_alloc(slab_cache_t *cache, int flags);
extern void slab_free(slab_cache_t *cache, void *obj);
extern count_t slab_reclaim(int flags);
extern void * slab_alloc(slab_cache_t *, int);
extern void slab_free(slab_cache_t *, void *);
extern count_t slab_reclaim(int);
 
/* slab subsytem initialization */
extern void slab_cache_init(void);
138,9 → 138,9
extern void slab_print_list(void);
 
/* malloc support */
extern void * malloc(unsigned int size, int flags);
extern void * realloc(void *ptr, unsigned int size, int flags);
extern void free(void *ptr);
extern void *malloc(unsigned int, int);
extern void *realloc(void *, unsigned int, int);
extern void free(void *);
#endif
 
/** @}
/branches/dynload/kernel/generic/src/synch/rwlock.c
82,7 → 82,7
*/
void rwlock_initialize(rwlock_t *rwl) {
spinlock_initialize(&rwl->lock, "rwlock_t");
mutex_initialize(&rwl->exclusive);
mutex_initialize(&rwl->exclusive, MUTEX_PASSIVE);
rwl->readers_in = 0;
}
 
/branches/dynload/kernel/generic/src/synch/mutex.c
38,42 → 38,54
#include <synch/mutex.h>
#include <synch/semaphore.h>
#include <synch/synch.h>
#include <debug.h>
 
/** Initialize mutex
/** Initialize mutex.
*
* Initialize mutex.
*
* @param mtx Mutex.
* @param mtx Mutex.
* @param type Type of the mutex.
*/
void mutex_initialize(mutex_t *mtx)
void mutex_initialize(mutex_t *mtx, mutex_type_t type)
{
mtx->type = type;
semaphore_initialize(&mtx->sem, 1);
}
 
/** Acquire mutex
/** Acquire mutex.
*
* Acquire mutex.
* Timeout mode and non-blocking mode can be requested.
*
* @param mtx Mutex.
* @param usec Timeout in microseconds.
* @param flags Specify mode of operation.
* @param mtx Mutex.
* @param usec Timeout in microseconds.
* @param flags Specify mode of operation.
*
* For exact description of possible combinations of
* usec and flags, see comment for waitq_sleep_timeout().
*
* @return See comment for waitq_sleep_timeout().
* @return See comment for waitq_sleep_timeout().
*/
int _mutex_lock_timeout(mutex_t *mtx, uint32_t usec, int flags)
{
return _semaphore_down_timeout(&mtx->sem, usec, flags);
int rc;
 
if (mtx->type == MUTEX_PASSIVE) {
rc = _semaphore_down_timeout(&mtx->sem, usec, flags);
} else {
ASSERT(mtx->type == MUTEX_ACTIVE);
ASSERT(usec == SYNCH_NO_TIMEOUT);
ASSERT(!(flags & SYNCH_FLAGS_INTERRUPTIBLE));
do {
rc = semaphore_trydown(&mtx->sem);
} while (SYNCH_FAILED(rc) &&
!(flags & SYNCH_FLAGS_NON_BLOCKING));
}
 
return rc;
}
 
/** Release mutex
/** Release mutex.
*
* Release mutex.
*
* @param mtx Mutex.
* @param mtx Mutex.
*/
void mutex_unlock(mutex_t *mtx)
{
/branches/dynload/kernel/generic/src/synch/condvar.c
43,7 → 43,7
 
/** Initialize condition variable.
*
* @param cv Condition variable.
* @param cv Condition variable.
*/
void condvar_initialize(condvar_t *cv)
{
50,11 → 50,10
waitq_initialize(&cv->wq);
}
 
/**
* Signal the condition has become true
* to the first waiting thread by waking it up.
/** Signal the condition has become true to the first waiting thread by waking
* it up.
*
* @param cv Condition variable.
* @param cv Condition variable.
*/
void condvar_signal(condvar_t *cv)
{
61,11 → 60,10
waitq_wakeup(&cv->wq, WAKEUP_FIRST);
}
 
/**
* Signal the condition has become true
* to all waiting threads by waking them up.
/** Signal the condition has become true to all waiting threads by waking
* them up.
*
* @param cv Condition variable.
* @param cv Condition variable.
*/
void condvar_broadcast(condvar_t *cv)
{
74,17 → 72,17
 
/** Wait for the condition becoming true.
*
* @param cv Condition variable.
* @param mtx Mutex.
* @param usec Timeout value in microseconds.
* @param flags Select mode of operation.
* @param cv Condition variable.
* @param mtx Mutex.
* @param usec Timeout value in microseconds.
* @param flags Select mode of operation.
*
* For exact description of meaning of possible combinations
* of usec and flags, see comment for waitq_sleep_timeout().
* Note that when SYNCH_FLAGS_NON_BLOCKING is specified here,
* ESYNCH_WOULD_BLOCK is always returned.
* For exact description of meaning of possible combinations of usec and flags,
* see comment for waitq_sleep_timeout(). Note that when
* SYNCH_FLAGS_NON_BLOCKING is specified here, ESYNCH_WOULD_BLOCK is always
* returned.
*
* @return See comment for waitq_sleep_timeout().
* @return See comment for waitq_sleep_timeout().
*/
int _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, uint32_t usec, int flags)
{
/branches/dynload/kernel/generic/src/main/kinit.c
146,7 → 146,8
/*
* Create kernel console.
*/
t = thread_create(kconsole, (void *) "kconsole", TASK, 0, "kconsole", false);
t = thread_create(kconsole, (void *) "kconsole", TASK, 0, "kconsole",
false);
if (t)
thread_ready(t);
else
187,7 → 188,8
init.tasks[i].size);
if (rd != RE_OK)
printf("Init binary %" PRIc " not used, error code %d.\n", i, rd);
printf("Init binary %" PRIc " not used, error "
"code %d.\n", i, rd);
}
}
/branches/dynload/kernel/generic/src/main/main.c
131,9 → 131,11
 
/** Main kernel routine for bootstrap CPU.
*
* Initializes the kernel by bootstrap CPU.
* This function passes control directly to
* main_bsp_separated_stack().
* The code here still runs on the boot stack, which knows nothing about
* preemption counts. Because of that, this function cannot directly call
* functions that disable or enable preemption (e.g. spinlock_lock()). The
* primary task of this function is to calculate address of a new stack and
* switch to it.
*
* Assuming interrupts_disable().
*
140,8 → 142,6
*/
void main_bsp(void)
{
LOG();
config.cpu_count = 1;
config.cpu_active = 1;
173,13 → 173,6
if (config.stack_base < stack_safe)
config.stack_base = ALIGN_UP(stack_safe, PAGE_SIZE);
version_print();
LOG("\nconfig.base=%#" PRIp " config.kernel_size=%" PRIs
"\nconfig.stack_base=%#" PRIp " config.stack_size=%" PRIs,
config.base, config.kernel_size,
config.stack_base, config.stack_size);
context_save(&ctx);
context_set(&ctx, FADDR(main_bsp_separated_stack), config.stack_base,
THREAD_STACK_SIZE);
195,9 → 188,18
*/
void main_bsp_separated_stack(void)
{
/* Keep this the first thing. */
the_initialize(THE);
 
LOG();
the_initialize(THE);
version_print();
LOG("\nconfig.base=%#" PRIp " config.kernel_size=%" PRIs
"\nconfig.stack_base=%#" PRIp " config.stack_size=%" PRIs,
config.base, config.kernel_size, config.stack_base,
config.stack_size);
 
/*
* kconsole data structures must be initialized very early
249,10 → 251,9
if (init.cnt > 0) {
count_t i;
for (i = 0; i < init.cnt; i++)
printf("init[%" PRIc "].addr=%#" PRIp
", init[%" PRIc "].size=%#" PRIs "\n",
i, init.tasks[i].addr,
i, init.tasks[i].size);
printf("init[%" PRIc "].addr=%#" PRIp ", init[%" PRIc
"].size=%#" PRIs "\n", i, init.tasks[i].addr,
i, init.tasks[i].size);
} else
printf("No init binaries found\n");
269,7 → 270,8
/*
* Create the first thread.
*/
thread_t *kinit_thread = thread_create(kinit, NULL, kernel, 0, "kinit", true);
thread_t *kinit_thread = thread_create(kinit, NULL, kernel, 0, "kinit",
true);
if (!kinit_thread)
panic("Can't create kinit thread\n");
LOG_EXEC(thread_ready(kinit_thread));
/branches/dynload/kernel/generic/src/proc/scheduler.c
451,8 → 451,8
/*
* Entering state is unexpected.
*/
panic("tid%" PRIu64 ": unexpected state %s\n", THREAD->tid,
thread_states[THREAD->state]);
panic("tid%" PRIu64 ": unexpected state %s\n",
THREAD->tid, thread_states[THREAD->state]);
break;
}
 
504,9 → 504,9
THREAD->state = Running;
 
#ifdef SCHEDULER_VERBOSE
printf("cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64 ", nrdy=%ld)\n",
CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks,
atomic_get(&CPU->nrdy));
printf("cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64
", nrdy=%ld)\n", CPU->id, THREAD->tid, THREAD->priority,
THREAD->ticks, atomic_get(&CPU->nrdy));
#endif
 
/*
640,9 → 640,9
*/
spinlock_lock(&t->lock);
#ifdef KCPULB_VERBOSE
printf("kcpulb%u: TID %" PRIu64 " -> cpu%u, nrdy=%ld, "
"avg=%ld\n", CPU->id, t->tid, CPU->id,
atomic_get(&CPU->nrdy),
printf("kcpulb%u: TID %" PRIu64 " -> cpu%u, "
"nrdy=%ld, avg=%ld\n", CPU->id, t->tid,
CPU->id, atomic_get(&CPU->nrdy),
atomic_get(&nrdy) / config.cpu_active);
#endif
t->flags |= THREAD_FLAG_STOLEN;
/branches/dynload/kernel/generic/src/proc/task.c
188,7 → 188,7
ipc_phone_connect(&ta->phones[0], ipc_phone_0);
atomic_set(&ta->active_calls, 0);
 
mutex_initialize(&ta->futexes_lock);
mutex_initialize(&ta->futexes_lock, MUTEX_PASSIVE);
btree_create(&ta->futexes);
ipl = interrupts_disable();
/branches/dynload/kernel/generic/src/mm/slab.c
167,7 → 167,7
* Allocate frames for slab space and initialize
*
*/
static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
static slab_t *slab_space_alloc(slab_cache_t *cache, int flags)
{
void *data;
slab_t *slab;
179,7 → 179,7
if (!data) {
return NULL;
}
if (! (cache->flags & SLAB_CACHE_SLINSIDE)) {
if (!(cache->flags & SLAB_CACHE_SLINSIDE)) {
slab = slab_alloc(slab_extern_cache, flags);
if (!slab) {
frame_free(KA2PA(data));
200,7 → 200,7
slab->cache = cache;
 
for (i = 0; i < cache->objects; i++)
*((int *) (slab->start + i*cache->size)) = i+1;
*((int *) (slab->start + i*cache->size)) = i + 1;
 
atomic_inc(&cache->allocated_slabs);
return slab;
239,8 → 239,7
*
* @return Number of freed pages
*/
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
slab_t *slab)
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, slab_t *slab)
{
int freed = 0;
 
256,7 → 255,7
ASSERT(slab->available < cache->objects);
 
*((int *)obj) = slab->nextavail;
slab->nextavail = (obj - slab->start)/cache->size;
slab->nextavail = (obj - slab->start) / cache->size;
slab->available++;
 
/* Move it to correct list */
281,7 → 280,7
*
* @return Object address or null
*/
static void * slab_obj_create(slab_cache_t *cache, int flags)
static void *slab_obj_create(slab_cache_t *cache, int flags)
{
slab_t *slab;
void *obj;
301,7 → 300,8
return NULL;
spinlock_lock(&cache->slablock);
} else {
slab = list_get_instance(cache->partial_slabs.next, slab_t, link);
slab = list_get_instance(cache->partial_slabs.next, slab_t,
link);
list_remove(&slab->link);
}
obj = slab->start + slab->nextavail * cache->size;
332,8 → 332,7
*
* @param first If true, return first, else last mag
*/
static slab_magazine_t * get_mag_from_cache(slab_cache_t *cache,
int first)
static slab_magazine_t *get_mag_from_cache(slab_cache_t *cache, int first)
{
slab_magazine_t *mag = NULL;
link_t *cur;
368,8 → 367,7
*
* @return Number of freed pages
*/
static count_t magazine_destroy(slab_cache_t *cache,
slab_magazine_t *mag)
static count_t magazine_destroy(slab_cache_t *cache, slab_magazine_t *mag)
{
unsigned int i;
count_t frames = 0;
389,7 → 387,7
*
* Assume cpu_magazine lock is held
*/
static slab_magazine_t * get_full_current_mag(slab_cache_t *cache)
static slab_magazine_t *get_full_current_mag(slab_cache_t *cache)
{
slab_magazine_t *cmag, *lastmag, *newmag;
 
423,7 → 421,7
*
* @return Pointer to object or NULL if not available
*/
static void * magazine_obj_get(slab_cache_t *cache)
static void *magazine_obj_get(slab_cache_t *cache)
{
slab_magazine_t *mag;
void *obj;
458,7 → 456,7
* allocate new, exchange last & current
*
*/
static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache)
static slab_magazine_t *make_empty_current_mag(slab_cache_t *cache)
{
slab_magazine_t *cmag,*lastmag,*newmag;
 
530,7 → 528,8
static unsigned int comp_objects(slab_cache_t *cache)
{
if (cache->flags & SLAB_CACHE_SLINSIDE)
return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) /
cache->size;
else
return (PAGE_SIZE << cache->order) / cache->size;
}
557,22 → 556,20
ASSERT(_slab_initialized >= 2);
 
cache->mag_cache = malloc(sizeof(slab_mag_cache_t) * config.cpu_count,0);
cache->mag_cache = malloc(sizeof(slab_mag_cache_t) * config.cpu_count,
0);
for (i = 0; i < config.cpu_count; i++) {
memsetb(&cache->mag_cache[i], sizeof(cache->mag_cache[i]), 0);
spinlock_initialize(&cache->mag_cache[i].lock, "slab_maglock_cpu");
spinlock_initialize(&cache->mag_cache[i].lock,
"slab_maglock_cpu");
}
}
 
/** Initialize allocated memory as a slab cache */
static void
_slab_cache_create(slab_cache_t *cache,
char *name,
size_t size,
size_t align,
int (*constructor)(void *obj, int kmflag),
int (*destructor)(void *obj),
int flags)
_slab_cache_create(slab_cache_t *cache, char *name, size_t size, size_t align,
int (*constructor)(void *obj, int kmflag), int (*destructor)(void *obj),
int flags)
{
int pages;
ipl_t ipl;
595,7 → 592,7
list_initialize(&cache->magazines);
spinlock_initialize(&cache->slablock, "slab_lock");
spinlock_initialize(&cache->maglock, "slab_maglock");
if (! (cache->flags & SLAB_CACHE_NOMAGAZINE))
if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
make_magcache(cache);
 
/* Compute slab sizes, object counts in slabs etc. */
608,7 → 605,7
if (pages == 1)
cache->order = 0;
else
cache->order = fnzb(pages-1)+1;
cache->order = fnzb(pages - 1) + 1;
 
while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
cache->order += 1;
629,18 → 626,16
}
 
/** Create slab cache */
slab_cache_t * slab_cache_create(char *name,
size_t size,
size_t align,
int (*constructor)(void *obj, int kmflag),
int (*destructor)(void *obj),
int flags)
slab_cache_t *
slab_cache_create(char *name, size_t size, size_t align,
int (*constructor)(void *obj, int kmflag), int (*destructor)(void *obj),
int flags)
{
slab_cache_t *cache;
 
cache = slab_alloc(&slab_cache_cache, 0);
_slab_cache_create(cache, name, size, align, constructor, destructor,
flags);
flags);
return cache;
}
 
664,7 → 659,7
* endless loop
*/
magcount = atomic_get(&cache->magazine_counter);
while (magcount-- && (mag=get_mag_from_cache(cache,0))) {
while (magcount-- && (mag=get_mag_from_cache(cache, 0))) {
frames += magazine_destroy(cache,mag);
if (!(flags & SLAB_RECLAIM_ALL) && frames)
break;
717,8 → 712,8
_slab_reclaim(cache, SLAB_RECLAIM_ALL);
 
/* All slabs must be empty */
if (!list_empty(&cache->full_slabs) \
|| !list_empty(&cache->partial_slabs))
if (!list_empty(&cache->full_slabs) ||
!list_empty(&cache->partial_slabs))
panic("Destroying cache that is not empty.");
 
if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
726,9 → 721,8
slab_free(&slab_cache_cache, cache);
}
 
/** Allocate new object from cache - if no flags given, always returns
memory */
void * slab_alloc(slab_cache_t *cache, int flags)
/** Allocate new object from cache - if no flags given, always returns memory */
void *slab_alloc(slab_cache_t *cache, int flags)
{
ipl_t ipl;
void *result = NULL;
757,9 → 751,8
 
ipl = interrupts_disable();
 
if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
|| magazine_obj_put(cache, obj)) {
 
if ((cache->flags & SLAB_CACHE_NOMAGAZINE) ||
magazine_obj_put(cache, obj)) {
slab_obj_destroy(cache, obj, slab);
 
}
786,7 → 779,8
* memory allocation from interrupts can deadlock.
*/
 
for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
for (cur = slab_cache_list.next; cur != &slab_cache_list;
cur = cur->next) {
cache = list_get_instance(cur, slab_cache_t, link);
frames += _slab_reclaim(cache, flags);
}
800,25 → 794,77
/* Print list of slabs */
void slab_print_list(void)
{
slab_cache_t *cache;
link_t *cur;
ipl_t ipl;
ipl = interrupts_disable();
spinlock_lock(&slab_cache_lock);
printf("slab name size pages obj/pg slabs cached allocated ctl\n");
printf("---------------- -------- ------ ------ ------ ------ --------- ---\n");
for (cur = slab_cache_list.next; cur != &slab_cache_list; cur = cur->next) {
int skip = 0;
 
printf("slab name size pages obj/pg slabs cached allocated"
" ctl\n");
printf("---------------- -------- ------ ------ ------ ------ ---------"
" ---\n");
 
while (true) {
slab_cache_t *cache;
link_t *cur;
ipl_t ipl;
int i;
 
/*
* We must not hold the slab_cache_lock spinlock when printing
* the statistics. Otherwise we can easily deadlock if the print
* needs to allocate memory.
*
* Therefore, we walk through the slab cache list, skipping some
* amount of already processed caches during each iteration and
* gathering statistics about the first unprocessed cache. For
* the sake of printing the statistics, we realese the
* slab_cache_lock and reacquire it afterwards. Then the walk
* starts again.
*
* This limits both the efficiency and also accuracy of the
* obtained statistics. The efficiency is decreased because the
* time complexity of the algorithm is quadratic instead of
* linear. The accuracy is impacted because we drop the lock
* after processing one cache. If there is someone else
* manipulating the cache list, we might omit an arbitrary
* number of caches or process one cache multiple times.
* However, we don't bleed for this algorithm for it is only
* statistics.
*/
 
ipl = interrupts_disable();
spinlock_lock(&slab_cache_lock);
 
for (i = 0, cur = slab_cache_list.next;
i < skip && cur != &slab_cache_list;
i++, cur = cur->next)
;
 
if (cur == &slab_cache_list) {
spinlock_unlock(&slab_cache_lock);
interrupts_restore(ipl);
break;
}
 
skip++;
 
cache = list_get_instance(cur, slab_cache_t, link);
 
char *name = cache->name;
uint8_t order = cache->order;
size_t size = cache->size;
unsigned int objects = cache->objects;
long allocated_slabs = atomic_get(&cache->allocated_slabs);
long cached_objs = atomic_get(&cache->cached_objs);
long allocated_objs = atomic_get(&cache->allocated_objs);
int flags = cache->flags;
spinlock_unlock(&slab_cache_lock);
interrupts_restore(ipl);
printf("%-16s %8" PRIs " %6d %6u %6ld %6ld %9ld %-3s\n",
cache->name, cache->size, (1 << cache->order), cache->objects,
atomic_get(&cache->allocated_slabs), atomic_get(&cache->cached_objs),
atomic_get(&cache->allocated_objs), cache->flags & SLAB_CACHE_SLINSIDE ? "in" : "out");
name, size, (1 << order), objects, allocated_slabs,
cached_objs, allocated_objs,
flags & SLAB_CACHE_SLINSIDE ? "in" : "out");
}
spinlock_unlock(&slab_cache_lock);
interrupts_restore(ipl);
}
 
void slab_cache_init(void)
826,32 → 872,24
int i, size;
 
/* Initialize magazine cache */
_slab_cache_create(&mag_cache,
"slab_magazine",
sizeof(slab_magazine_t) + SLAB_MAG_SIZE * sizeof(void*),
sizeof(uintptr_t),
NULL, NULL,
SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
_slab_cache_create(&mag_cache, "slab_magazine",
sizeof(slab_magazine_t) + SLAB_MAG_SIZE * sizeof(void*),
sizeof(uintptr_t), NULL, NULL, SLAB_CACHE_NOMAGAZINE |
SLAB_CACHE_SLINSIDE);
/* Initialize slab_cache cache */
_slab_cache_create(&slab_cache_cache,
"slab_cache",
sizeof(slab_cache_cache),
sizeof(uintptr_t),
NULL, NULL,
SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
_slab_cache_create(&slab_cache_cache, "slab_cache",
sizeof(slab_cache_cache), sizeof(uintptr_t), NULL, NULL,
SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
/* Initialize external slab cache */
slab_extern_cache = slab_cache_create("slab_extern",
sizeof(slab_t),
0, NULL, NULL,
SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED);
slab_extern_cache = slab_cache_create("slab_extern", sizeof(slab_t), 0,
NULL, NULL, SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED);
 
/* Initialize structures for malloc */
for (i=0, size=(1 << SLAB_MIN_MALLOC_W);
i < (SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1);
i++, size <<= 1) {
malloc_caches[i] = slab_cache_create(malloc_names[i],
size, 0,
NULL,NULL, SLAB_CACHE_MAGDEFERRED);
for (i = 0, size = (1 << SLAB_MIN_MALLOC_W);
i < (SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1);
i++, size <<= 1) {
malloc_caches[i] = slab_cache_create(malloc_names[i], size, 0,
NULL, NULL, SLAB_CACHE_MAGDEFERRED);
}
#ifdef CONFIG_DEBUG
_slab_initialized = 1;
876,9 → 914,11
 
spinlock_lock(&slab_cache_lock);
for (cur=slab_cache_list.next; cur != &slab_cache_list;cur=cur->next){
for (cur = slab_cache_list.next; cur != &slab_cache_list;
cur = cur->next){
s = list_get_instance(cur, slab_cache_t, link);
if ((s->flags & SLAB_CACHE_MAGDEFERRED) != SLAB_CACHE_MAGDEFERRED)
if ((s->flags & SLAB_CACHE_MAGDEFERRED) !=
SLAB_CACHE_MAGDEFERRED)
continue;
make_magcache(s);
s->flags &= ~SLAB_CACHE_MAGDEFERRED;
889,7 → 929,7
 
/**************************************/
/* kalloc/kfree functions */
void * malloc(unsigned int size, int flags)
void *malloc(unsigned int size, int flags)
{
ASSERT(_slab_initialized);
ASSERT(size && size <= (1 << SLAB_MAX_MALLOC_W));
902,7 → 942,7
return slab_alloc(malloc_caches[idx], flags);
}
 
void * realloc(void *ptr, unsigned int size, int flags)
void *realloc(void *ptr, unsigned int size, int flags)
{
ASSERT(_slab_initialized);
ASSERT(size <= (1 << SLAB_MAX_MALLOC_W));
/branches/dynload/kernel/generic/src/mm/as.c
126,7 → 126,7
int rc;
 
link_initialize(&as->inactive_as_with_asid_link);
mutex_initialize(&as->lock);
mutex_initialize(&as->lock, MUTEX_PASSIVE);
rc = as_constructor_arch(as, flags);
168,7 → 168,7
#ifdef __OBJC__
as = [as_t new];
link_initialize(&as->inactive_as_with_asid_link);
mutex_initialize(&as->lock);
mutex_initialize(&as->lock, MUTEX_PASSIVE);
(void) as_constructor_arch(as, flags);
#else
as = (as_t *) slab_alloc(as_slab, 0);
312,7 → 312,7
a = (as_area_t *) malloc(sizeof(as_area_t), 0);
 
mutex_initialize(&a->lock);
mutex_initialize(&a->lock, MUTEX_PASSIVE);
a->as = as;
a->flags = flags;
694,7 → 694,7
sh_info = src_area->sh_info;
if (!sh_info) {
sh_info = (share_info_t *) malloc(sizeof(share_info_t), 0);
mutex_initialize(&sh_info->lock);
mutex_initialize(&sh_info->lock, MUTEX_PASSIVE);
sh_info->refcount = 2;
btree_create(&sh_info->pagemap);
src_area->sh_info = sh_info;
/branches/dynload/kernel/generic/src/mm/frame.c
58,6 → 58,8
#include <debug.h>
#include <adt/list.h>
#include <synch/spinlock.h>
#include <synch/mutex.h>
#include <synch/condvar.h>
#include <arch/asm.h>
#include <arch.h>
#include <print.h>
103,6 → 105,14
 
static zones_t zones;
 
/*
* Synchronization primitives used to sleep when there is no memory
* available.
*/
mutex_t mem_avail_mtx;
condvar_t mem_avail_cv;
unsigned long mem_avail_frames = 0; /**< Number of available frames. */
unsigned long mem_avail_gen = 0; /**< Generation counter. */
 
/********************/
/* Helper functions */
129,11 → 139,9
return (frame - zone->frames);
}
 
/** Initialize frame structure
/** Initialize frame structure.
*
* Initialize frame structure.
*
* @param frame Frame structure to be initialized.
* @param frame Frame structure to be initialized.
*/
static void frame_initialize(frame_t *frame)
{
145,11 → 153,10
/* Zoneinfo functions */
/**********************/
 
/**
* Insert-sort zone into zones list
/** Insert-sort zone into zones list.
*
* @param newzone New zone to be inserted into zone list
* @return zone number on success, -1 on error
* @param newzone New zone to be inserted into zone list.
* @return Zone number on success, -1 on error.
*/
static int zones_add_zone(zone_t *newzone)
{
171,7 → 178,8
for (i = 0; i < zones.count; i++) {
/* Check for overflow */
z = zones.info[i];
if (overlaps(newzone->base, newzone->count, z->base, z->count)) {
if (overlaps(newzone->base, newzone->count, z->base,
z->count)) {
printf("Zones overlap!\n");
return -1;
}
193,16 → 201,16
}
 
/**
* Try to find a zone where can we find the frame
* Try to find a zone where can we find the frame.
*
* Assume interrupts are disabled.
*
* @param frame Frame number contained in zone
* @param pzone If not null, it is used as zone hint. Zone index
* is filled into the variable on success.
* @return Pointer to locked zone containing frame
* @param frame Frame number contained in zone.
* @param pzone If not null, it is used as zone hint. Zone index is
* filled into the variable on success.
* @return Pointer to locked zone containing frame.
*/
static zone_t * find_zone_and_lock(pfn_t frame, unsigned int *pzone)
static zone_t *find_zone_and_lock(pfn_t frame, unsigned int *pzone)
{
unsigned int i;
unsigned int hint = pzone ? *pzone : 0;
245,11 → 253,11
*
* Assume interrupts are disabled.
*
* @param order Size (2^order) of free space we are trying to find
* @param pzone Pointer to preferred zone or NULL, on return contains zone
* number
* @param order Size (2^order) of free space we are trying to find.
* @param pzone Pointer to preferred zone or NULL, on return contains
* zone number.
*/
static zone_t * find_free_zone_and_lock(uint8_t order, unsigned int *pzone)
static zone_t *find_free_zone_and_lock(uint8_t order, unsigned int *pzone)
{
unsigned int i;
zone_t *z;
283,12 → 291,13
/* Buddy system functions */
/**************************/
 
/** Buddy system find_block implementation
/** Buddy system find_block implementation.
*
* Find block that is parent of current list.
* That means go to lower addresses, until such block is found
*
* @param order - Order of parent must be different then this parameter!!
* @param order Order of parent must be different then this
* parameter!!
*/
static link_t *zone_buddy_find_block(buddy_system_t *b, link_t *child,
uint8_t order)
321,12 → 330,12
printf("%" PRIi, index);
}
 
/** Buddy system find_buddy implementation
/** Buddy system find_buddy implementation.
*
* @param b Buddy system.
* @param block Block for which buddy should be found
* @param b Buddy system.
* @param block Block for which buddy should be found.
*
* @return Buddy for given block if found
* @return Buddy for given block if found.
*/
static link_t *zone_buddy_find_buddy(buddy_system_t *b, link_t *block)
{
345,9 → 354,11
 
ASSERT(is_left ^ is_right);
if (is_left) {
index = (frame_index(zone, frame)) + (1 << frame->buddy_order);
index = (frame_index(zone, frame)) +
(1 << frame->buddy_order);
} else { /* if (is_right) */
index = (frame_index(zone, frame)) - (1 << frame->buddy_order);
index = (frame_index(zone, frame)) -
(1 << frame->buddy_order);
}
if (frame_index_valid(zone, index)) {
360,14 → 371,15
return NULL;
}
 
/** Buddy system bisect implementation
/** Buddy system bisect implementation.
*
* @param b Buddy system.
* @param block Block to bisect
* @param b Buddy system.
* @param block Block to bisect.
*
* @return right block
* @return Right block.
*/
static link_t * zone_buddy_bisect(buddy_system_t *b, link_t *block) {
static link_t *zone_buddy_bisect(buddy_system_t *b, link_t *block)
{
frame_t *frame_l, *frame_r;
 
frame_l = list_get_instance(block, frame_t, buddy_link);
376,13 → 388,14
return &frame_r->buddy_link;
}
 
/** Buddy system coalesce implementation
/** Buddy system coalesce implementation.
*
* @param b Buddy system.
* @param block_1 First block
* @param block_2 First block's buddy
* @param b Buddy system.
* @param block_1 First block.
* @param block_2 First block's buddy.
*
* @return Coalesced block (actually block that represents lower address)
* @return Coalesced block (actually block that represents lower
* address).
*/
static link_t *zone_buddy_coalesce(buddy_system_t *b, link_t *block_1,
link_t *block_2)
395,39 → 408,41
return frame1 < frame2 ? block_1 : block_2;
}
 
/** Buddy system set_order implementation
/** Buddy system set_order implementation.
*
* @param b Buddy system.
* @param block Buddy system block
* @param order Order to set
* @param b Buddy system.
* @param block Buddy system block.
* @param order Order to set.
*/
static void zone_buddy_set_order(buddy_system_t *b, link_t *block,
uint8_t order) {
uint8_t order)
{
frame_t *frame;
frame = list_get_instance(block, frame_t, buddy_link);
frame->buddy_order = order;
}
 
/** Buddy system get_order implementation
/** Buddy system get_order implementation.
*
* @param b Buddy system.
* @param block Buddy system block
* @param b Buddy system.
* @param block Buddy system block.
*
* @return Order of block
* @return Order of block.
*/
static uint8_t zone_buddy_get_order(buddy_system_t *b, link_t *block) {
static uint8_t zone_buddy_get_order(buddy_system_t *b, link_t *block)
{
frame_t *frame;
frame = list_get_instance(block, frame_t, buddy_link);
return frame->buddy_order;
}
 
/** Buddy system mark_busy implementation
/** Buddy system mark_busy implementation.
*
* @param b Buddy system
* @param block Buddy system block
*
* @param b Buddy system.
* @param block Buddy system block.
*/
static void zone_buddy_mark_busy(buddy_system_t *b, link_t * block) {
static void zone_buddy_mark_busy(buddy_system_t *b, link_t * block)
{
frame_t * frame;
 
frame = list_get_instance(block, frame_t, buddy_link);
434,13 → 449,13
frame->refcount = 1;
}
 
/** Buddy system mark_available implementation
/** Buddy system mark_available implementation.
*
* @param b Buddy system
* @param block Buddy system block
*
* @param b Buddy system.
* @param block Buddy system block.
*/
static void zone_buddy_mark_available(buddy_system_t *b, link_t *block) {
static void zone_buddy_mark_available(buddy_system_t *b, link_t *block)
{
frame_t *frame;
frame = list_get_instance(block, frame_t, buddy_link);
frame->refcount = 0;
462,15 → 477,15
/* Zone functions */
/******************/
 
/** Allocate frame in particular zone
/** Allocate frame in particular zone.
*
* Assume zone is locked
* Assume zone is locked.
* Panics if allocation is impossible.
*
* @param zone Zone to allocate from.
* @param order Allocate exactly 2^order frames.
* @param zone Zone to allocate from.
* @param order Allocate exactly 2^order frames.
*
* @return Frame index in zone
* @return Frame index in zone.
*
*/
static pfn_t zone_frame_alloc(zone_t *zone, uint8_t order)
496,12 → 511,12
return v;
}
 
/** Free frame from zone
/** Free frame from zone.
*
* Assume zone is locked
* Assume zone is locked.
*
* @param zone Pointer to zone from which the frame is to be freed
* @param frame_idx Frame index relative to zone
* @param zone Pointer to zone from which the frame is to be freed.
* @param frame_idx Frame index relative to zone.
*/
static void zone_frame_free(zone_t *zone, index_t frame_idx)
{
524,14 → 539,14
}
}
 
/** Return frame from zone */
static frame_t * zone_get_frame(zone_t *zone, index_t frame_idx)
/** Return frame from zone. */
static frame_t *zone_get_frame(zone_t *zone, index_t frame_idx)
{
ASSERT(frame_idx < zone->count);
return &zone->frames[frame_idx];
}
 
/** Mark frame in zone unavailable to allocation */
/** Mark frame in zone unavailable to allocation. */
static void zone_mark_unavailable(zone_t *zone, index_t frame_idx)
{
frame_t *frame;
544,18 → 559,21
&frame->buddy_link);
ASSERT(link);
zone->free_count--;
 
mutex_lock(&mem_avail_mtx);
mem_avail_frames--;
mutex_unlock(&mem_avail_mtx);
}
 
/**
* Join 2 zones
/** Join two zones.
*
* Expect zone_t *z to point to space at least zone_conf_size large
* Expect zone_t *z to point to space at least zone_conf_size large.
*
* Assume z1 & z2 are locked
* Assume z1 & z2 are locked.
*
* @param z Target zone structure pointer
* @param z1 Zone to merge
* @param z2 Zone to merge
* @param z Target zone structure pointer.
* @param z1 Zone to merge.
* @param z2 Zone to merge.
*/
static void _zone_merge(zone_t *z, zone_t *z1, zone_t *z2)
{
629,7 → 647,7
}
}
 
/** Return old configuration frames into the zone
/** Return old configuration frames into the zone.
*
* We have several cases
* - the conf. data is outside of zone -> exit, shall we call frame_free??
636,9 → 654,9
* - the conf. data was created by zone_create or
* updated with reduce_region -> free every frame
*
* @param newzone The actual zone where freeing should occur
* @param oldzone Pointer to old zone configuration data that should
* be freed from new zone
* @param newzone The actual zone where freeing should occur.
* @param oldzone Pointer to old zone configuration data that should
* be freed from new zone.
*/
static void return_config_frames(zone_t *newzone, zone_t *oldzone)
{
662,7 → 680,7
}
}
 
/** Reduce allocated block to count of order 0 frames
/** Reduce allocated block to count of order 0 frames.
*
* The allocated block need 2^order frames of space. Reduce all frames
* in block to order 0 and free the unneeded frames. This means, that
670,8 → 688,8
* you have to free every frame.
*
* @param zone
* @param frame_idx Index to block
* @param count Allocated space in block
* @param frame_idx Index to block.
* @param count Allocated space in block.
*/
static void zone_reduce_region(zone_t *zone, pfn_t frame_idx, count_t count)
{
698,7 → 716,7
}
}
 
/** Merge zones z1 and z2
/** Merge zones z1 and z2.
*
* - the zones must be 2 zones with no zone existing in between,
* which means that z2 = z1+1
722,7 → 740,7
if ((z1 >= zones.count) || (z2 >= zones.count))
goto errout;
/* We can join only 2 zones with none existing inbetween */
if (z2-z1 != 1)
if (z2 - z1 != 1)
goto errout;
 
zone1 = zones.info[z1];
773,8 → 791,7
interrupts_restore(ipl);
}
 
/**
* Merge all zones into one big zone
/** Merge all zones into one big zone.
*
* It is reasonable to do this on systems whose bios reports parts in chunks,
* so that we could have 1 zone (it's faster).
784,21 → 801,19
int count = zones.count;
 
while (zones.count > 1 && --count) {
zone_merge(0,1);
zone_merge(0, 1);
break;
}
}
 
/** Create frame zone
/** Create new frame zone.
*
* Create new frame zone.
* @param start Physical address of the first frame within the zone.
* @param count Count of frames in zone.
* @param z Address of configuration information of zone.
* @param flags Zone flags.
*
* @param start Physical address of the first frame within the zone.
* @param count Count of frames in zone
* @param z Address of configuration information of zone
* @param flags Zone flags.
*
* @return Initialized zone.
* @return Initialized zone.
*/
static void zone_construct(pfn_t start, count_t count, zone_t *z, int flags)
{
819,8 → 834,7
z->buddy_system = (buddy_system_t *)&z[1];
buddy_system_create(z->buddy_system, max_order,
&zone_buddy_system_operations,
(void *) z);
&zone_buddy_system_operations, (void *) z);
/* Allocate frames _after_ the conframe */
/* Check sizes */
837,10 → 851,10
}
}
 
/** Compute configuration data size for zone
/** Compute configuration data size for zone.
*
* @param count Size of zone in frames
* @return Size of zone configuration info (in bytes)
* @param count Size of zone in frames.
* @return Size of zone configuration info (in bytes).
*/
uintptr_t zone_conf_size(count_t count)
{
852,20 → 866,20
return size;
}
 
/** Create and add zone to system
/** Create and add zone to system.
*
* @param start First frame number (absolute)
* @param count Size of zone in frames
* @param confframe Where configuration frames are supposed to be.
* Automatically checks, that we will not disturb the
* kernel and possibly init.
* If confframe is given _outside_ this zone, it is expected,
* that the area is already marked BUSY and big enough
* to contain zone_conf_size() amount of data.
* If the confframe is inside the area, the zone free frame
* information is modified not to include it.
* @param start First frame number (absolute).
* @param count Size of zone in frames.
* @param confframe Where configuration frames are supposed to be.
* Automatically checks, that we will not disturb the
* kernel and possibly init. If confframe is given
* _outside_ this zone, it is expected, that the area is
* already marked BUSY and big enough to contain
* zone_conf_size() amount of data. If the confframe is
* inside the area, the zone free frame information is
* modified not to include it.
*
* @return Zone number or -1 on error
* @return Zone number or -1 on error.
*/
int zone_create(pfn_t start, count_t count, pfn_t confframe, int flags)
{
884,8 → 898,8
* it does not span kernel & init
*/
confcount = SIZE2FRAMES(zone_conf_size(count));
if (confframe >= start && confframe < start+count) {
for (;confframe < start + count; confframe++) {
if (confframe >= start && confframe < start + count) {
for (; confframe < start + count; confframe++) {
addr = PFN2ADDR(confframe);
if (overlaps(addr, PFN2ADDR(confcount),
KA2PA(config.base), config.kernel_size))
919,11 → 933,16
if (znum == -1)
return -1;
 
mutex_lock(&mem_avail_mtx);
mem_avail_frames += count;
mutex_unlock(&mem_avail_mtx);
 
/* If confdata in zone, mark as unavailable */
if (confframe >= start && confframe < start + count)
for (i = confframe; i < confframe + confcount; i++) {
zone_mark_unavailable(z, i - z->base);
}
return znum;
}
 
930,7 → 949,7
/***************************************/
/* Frame functions */
 
/** Set parent of frame */
/** Set parent of frame. */
void frame_set_parent(pfn_t pfn, void *data, unsigned int hint)
{
zone_t *zone = find_zone_and_lock(pfn, &hint);
937,7 → 956,7
 
ASSERT(zone);
 
zone_get_frame(zone, pfn-zone->base)->parent = data;
zone_get_frame(zone, pfn - zone->base)->parent = data;
spinlock_unlock(&zone->lock);
}
 
955,19 → 974,20
 
/** Allocate power-of-two frames of physical memory.
*
* @param order Allocate exactly 2^order frames.
* @param flags Flags for host zone selection and address processing.
* @param pzone Preferred zone
* @param order Allocate exactly 2^order frames.
* @param flags Flags for host zone selection and address processing.
* @param pzone Preferred zone.
*
* @return Physical address of the allocated frame.
* @return Physical address of the allocated frame.
*
*/
void * frame_alloc_generic(uint8_t order, int flags, unsigned int *pzone)
void *frame_alloc_generic(uint8_t order, int flags, unsigned int *pzone)
{
ipl_t ipl;
int freed;
pfn_t v;
zone_t *zone;
unsigned long gen = 0;
loop:
ipl = interrupts_disable();
991,14 → 1011,41
}
if (!zone) {
/*
* TODO: Sleep until frames are available again.
* Sleep until some frames are available again.
*/
interrupts_restore(ipl);
 
if (flags & FRAME_ATOMIC)
if (flags & FRAME_ATOMIC) {
interrupts_restore(ipl);
return 0;
}
panic("Sleep not implemented.\n");
#ifdef CONFIG_DEBUG
unsigned long avail;
 
mutex_lock(&mem_avail_mtx);
avail = mem_avail_frames;
mutex_unlock(&mem_avail_mtx);
 
printf("Thread %" PRIu64 " waiting for %u frames, "
"%u available.\n", THREAD->tid, 1ULL << order, avail);
#endif
 
mutex_lock(&mem_avail_mtx);
while ((mem_avail_frames < (1ULL << order)) ||
gen == mem_avail_gen)
condvar_wait(&mem_avail_cv, &mem_avail_mtx);
gen = mem_avail_gen;
mutex_unlock(&mem_avail_mtx);
 
#ifdef CONFIG_DEBUG
mutex_lock(&mem_avail_mtx);
avail = mem_avail_frames;
mutex_unlock(&mem_avail_mtx);
 
printf("Thread %" PRIu64 " woken up, %u frames available.\n",
THREAD->tid, avail);
#endif
 
interrupts_restore(ipl);
goto loop;
}
1006,6 → 1053,11
v += zone->base;
 
spinlock_unlock(&zone->lock);
mutex_lock(&mem_avail_mtx);
mem_avail_frames -= (1ULL << order);
mutex_unlock(&mem_avail_mtx);
 
interrupts_restore(ipl);
 
if (flags & FRAME_KA)
1019,7 → 1071,7
* Decrement frame reference count.
* If it drops to zero, move the frame structure to free list.
*
* @param Frame Physical Address of of the frame to be freed.
* @param frame Physical Address of of the frame to be freed.
*/
void frame_free(uintptr_t frame)
{
1028,16 → 1080,26
pfn_t pfn = ADDR2PFN(frame);
 
ipl = interrupts_disable();
 
/*
* First, find host frame zone for addr.
*/
zone = find_zone_and_lock(pfn,NULL);
zone = find_zone_and_lock(pfn, NULL);
ASSERT(zone);
zone_frame_free(zone, pfn-zone->base);
zone_frame_free(zone, pfn - zone->base);
spinlock_unlock(&zone->lock);
/*
* Signal that some memory has been freed.
*/
mutex_lock(&mem_avail_mtx);
mem_avail_frames++;
mem_avail_gen++;
condvar_broadcast(&mem_avail_cv);
mutex_unlock(&mem_avail_mtx);
 
interrupts_restore(ipl);
}
 
1046,7 → 1108,7
* Find respective frame structure for supplied PFN and
* increment frame reference count.
*
* @param pfn Frame number of the frame to be freed.
* @param pfn Frame number of the frame to be freed.
*/
void frame_reference_add(pfn_t pfn)
{
1059,10 → 1121,10
/*
* First, find host frame zone for addr.
*/
zone = find_zone_and_lock(pfn,NULL);
zone = find_zone_and_lock(pfn, NULL);
ASSERT(zone);
frame = &zone->frames[pfn-zone->base];
frame = &zone->frames[pfn - zone->base];
frame->refcount++;
spinlock_unlock(&zone->lock);
1069,7 → 1131,7
interrupts_restore(ipl);
}
 
/** Mark given range unavailable in frame zones */
/** Mark given range unavailable in frame zones. */
void frame_mark_unavailable(pfn_t start, count_t count)
{
unsigned int i;
1086,15 → 1148,14
}
}
 
/** Initialize physical memory management
*
* Initialize physical memory managemnt.
*/
/** Initialize physical memory management. */
void frame_init(void)
{
if (config.cpu_active == 1) {
zones.count = 0;
spinlock_initialize(&zones.lock, "zones.lock");
mutex_initialize(&mem_avail_mtx, MUTEX_ACTIVE);
condvar_initialize(&mem_avail_cv);
}
/* Tell the architecture to create some memory */
frame_arch_init();
1122,10 → 1183,9
}
 
 
/** Return total size of all zones
*
*/
uint64_t zone_total_size(void) {
/** Return total size of all zones. */
uint64_t zone_total_size(void)
{
zone_t *zone = NULL;
unsigned int i;
ipl_t ipl;
1147,12 → 1207,9
return total;
}
 
 
 
/** Prints list of zones
*
*/
void zone_print_list(void) {
/** Prints list of zones. */
void zone_print_list(void)
{
zone_t *zone = NULL;
unsigned int i;
ipl_t ipl;
1175,13 → 1232,14
spinlock_lock(&zone->lock);
 
#ifdef __32_BITS__
printf("%-2u %10p %12" PRIc " %12" PRIc "\n", i, PFN2ADDR(zone->base),
zone->free_count, zone->busy_count);
printf("%-2u %10p %12" PRIc " %12" PRIc "\n",
i, PFN2ADDR(zone->base), zone->free_count,
zone->busy_count);
#endif
 
#ifdef __64_BITS__
printf("%-2u %18p %12" PRIc " %12" PRIc "\n", i, PFN2ADDR(zone->base),
zone->free_count, zone->busy_count);
printf("%-2u %18p %12" PRIc " %12" PRIc "\n", i,
PFN2ADDR(zone->base), zone->free_count, zone->busy_count);
#endif
spinlock_unlock(&zone->lock);
1193,9 → 1251,10
 
/** Prints zone details.
*
* @param num Zone base address or zone number.
* @param num Zone base address or zone number.
*/
void zone_print_one(unsigned int num) {
void zone_print_one(unsigned int num)
{
zone_t *zone = NULL;
ipl_t ipl;
unsigned int i;
1218,11 → 1277,11
printf("Memory zone information\n");
printf("Zone base address: %p\n", PFN2ADDR(zone->base));
printf("Zone size: %" PRIc " frames (%" PRIs " KB)\n", zone->count,
SIZE2KB(FRAMES2SIZE(zone->count)));
printf("Allocated space: %" PRIc " frames (%" PRIs " KB)\n", zone->busy_count,
SIZE2KB(FRAMES2SIZE(zone->busy_count)));
printf("Available space: %" PRIc " frames (%" PRIs " KB)\n", zone->free_count,
SIZE2KB(FRAMES2SIZE(zone->free_count)));
SIZE2KB(FRAMES2SIZE(zone->count)));
printf("Allocated space: %" PRIc " frames (%" PRIs " KB)\n",
zone->busy_count, SIZE2KB(FRAMES2SIZE(zone->busy_count)));
printf("Available space: %" PRIc " frames (%" PRIs " KB)\n",
zone->free_count, SIZE2KB(FRAMES2SIZE(zone->free_count)));
buddy_system_structure_print(zone->buddy_system, FRAME_SIZE);
spinlock_unlock(&zone->lock);
/branches/dynload/kernel/generic/src/ipc/ipc.c
87,7 → 87,8
call_t *call;
 
call = slab_alloc(ipc_call_slab, flags);
_ipc_call_init(call);
if (call)
_ipc_call_init(call);
 
return call;
}
160,7 → 161,7
*/
void ipc_phone_init(phone_t *phone)
{
mutex_initialize(&phone->lock);
mutex_initialize(&phone->lock, MUTEX_PASSIVE);
phone->callee = NULL;
phone->state = IPC_PHONE_FREE;
atomic_set(&phone->active_calls, 0);
/branches/dynload/kernel/Makefile
43,11 → 43,11
-DKERNEL
 
GCC_CFLAGS = -I$(INCLUDES) -O$(OPTIMIZATION) \
-fno-builtin -fomit-frame-pointer -Wall -Wextra -Wno-unused-parameter -Wmissing-prototypes -Werror \
-fno-builtin -Wall -Wextra -Wno-unused-parameter -Wmissing-prototypes -Werror \
-nostdlib -nostdinc
 
ICC_CFLAGS = -I$(INCLUDES) -O$(OPTIMIZATION) \
-fno-builtin -fomit-frame-pointer -Wall -Wmissing-prototypes -Werror \
-fno-builtin -Wall -Wmissing-prototypes -Werror \
-nostdlib -nostdinc \
-wd170
 
393,5 → 393,15
%.o: %.s
$(AS) $(AFLAGS) $< -o $@
 
#
# The FPU tests are the only objects for which we allow the compiler to generate
# FPU instructions.
#
test/fpu/%.o: test/fpu/%.c
$(CC) $(DEFS) $(CFLAGS) $(EXTRA_FLAGS) -c $< -o $@
 
#
# Ordinary objects.
#
%.o: %.c
$(CC) $(DEFS) $(CFLAGS) $(EXTRA_FLAGS) -c $< -o $@
$(CC) $(DEFS) $(CFLAGS) $(EXTRA_FLAGS) $(FPU_NO_CFLAGS) -c $< -o $@
/branches/dynload/kernel/arch/ppc32/src/asm.S
312,4 → 312,6
 
memcpy_from_uspace_failover_address:
memcpy_to_uspace_failover_address:
b memcpy_from_uspace_failover_address
# return zero, failure
xor r3, r3, r3
blr
/branches/dynload/kernel/arch/amd64/include/atomic.h
41,17 → 41,17
 
static inline void atomic_inc(atomic_t *val) {
#ifdef CONFIG_SMP
asm volatile ("lock incq %0\n" : "=m" (val->count));
asm volatile ("lock incq %0\n" : "+m" (val->count));
#else
asm volatile ("incq %0\n" : "=m" (val->count));
asm volatile ("incq %0\n" : "+m" (val->count));
#endif /* CONFIG_SMP */
}
 
static inline void atomic_dec(atomic_t *val) {
#ifdef CONFIG_SMP
asm volatile ("lock decq %0\n" : "=m" (val->count));
asm volatile ("lock decq %0\n" : "+m" (val->count));
#else
asm volatile ("decq %0\n" : "=m" (val->count));
asm volatile ("decq %0\n" : "+m" (val->count));
#endif /* CONFIG_SMP */
}
 
61,7 → 61,7
 
asm volatile (
"lock xaddq %1, %0\n"
: "=m" (val->count), "+r" (r)
: "+m" (val->count), "+r" (r)
);
 
return r;
73,14 → 73,14
asm volatile (
"lock xaddq %1, %0\n"
: "=m" (val->count), "+r" (r)
: "+m" (val->count), "+r" (r)
);
return r;
}
 
#define atomic_preinc(val) (atomic_postinc(val)+1)
#define atomic_predec(val) (atomic_postdec(val)-1)
#define atomic_preinc(val) (atomic_postinc(val) + 1)
#define atomic_predec(val) (atomic_postdec(val) - 1)
 
static inline uint64_t test_and_set(atomic_t *val) {
uint64_t v;
88,7 → 88,7
asm volatile (
"movq $1, %0\n"
"xchgq %0, %1\n"
: "=r" (v),"=m" (val->count)
: "=r" (v), "+m" (val->count)
);
return v;
102,20 → 102,20
 
preemption_disable();
asm volatile (
"0:;"
"0:\n"
#ifdef CONFIG_HT
"pause;"
"pause\n"
#endif
"mov %0, %1;"
"testq %1, %1;"
"jnz 0b;" /* Lightweight looping on locked spinlock */
"mov %0, %1\n"
"testq %1, %1\n"
"jnz 0b\n" /* Lightweight looping on locked spinlock */
"incq %1;" /* now use the atomic operation */
"xchgq %0, %1;"
"testq %1, %1;"
"jnz 0b;"
: "=m"(val->count),"=r"(tmp)
);
"incq %1\n" /* now use the atomic operation */
"xchgq %0, %1\n"
"testq %1, %1\n"
"jnz 0b\n"
: "+m" (val->count), "=r"(tmp)
);
/*
* Prevent critical section code from bleeding out this way up.
*/
/branches/dynload/kernel/arch/amd64/Makefile.inc
35,6 → 35,7
TARGET = amd64-linux-gnu
TOOLCHAIN_DIR = /usr/local/amd64
 
FPU_NO_CFLAGS = -mno-sse -mno-sse2
CMN1 = -m64 -mcmodel=kernel -mno-red-zone -fno-unwind-tables
GCC_CFLAGS += $(CMN1)
ICC_CFLAGS += $(CMN1)
/branches/dynload/kernel/arch/mips32/include/atomic.h
63,7 → 63,7
" sc %0, %1\n"
" beq %0, %4, 1b\n" /* if the atomic operation failed, try again */
" nop\n"
: "=&r" (tmp), "=m" (val->count), "=&r" (v)
: "=&r" (tmp), "+m" (val->count), "=&r" (v)
: "r" (i), "i" (0)
);
 
/branches/dynload/kernel/arch/ia32/include/atomic.h
41,17 → 41,17
 
static inline void atomic_inc(atomic_t *val) {
#ifdef CONFIG_SMP
asm volatile ("lock incl %0\n" : "=m" (val->count));
asm volatile ("lock incl %0\n" : "+m" (val->count));
#else
asm volatile ("incl %0\n" : "=m" (val->count));
asm volatile ("incl %0\n" : "+m" (val->count));
#endif /* CONFIG_SMP */
}
 
static inline void atomic_dec(atomic_t *val) {
#ifdef CONFIG_SMP
asm volatile ("lock decl %0\n" : "=m" (val->count));
asm volatile ("lock decl %0\n" : "+m" (val->count));
#else
asm volatile ("decl %0\n" : "=m" (val->count));
asm volatile ("decl %0\n" : "+m" (val->count));
#endif /* CONFIG_SMP */
}
 
61,7 → 61,7
 
asm volatile (
"lock xaddl %1, %0\n"
: "=m" (val->count), "+r" (r)
: "+m" (val->count), "+r" (r)
);
 
return r;
73,14 → 73,14
asm volatile (
"lock xaddl %1, %0\n"
: "=m" (val->count), "+r"(r)
: "+m" (val->count), "+r"(r)
);
return r;
}
 
#define atomic_preinc(val) (atomic_postinc(val)+1)
#define atomic_predec(val) (atomic_postdec(val)-1)
#define atomic_preinc(val) (atomic_postinc(val) + 1)
#define atomic_predec(val) (atomic_postdec(val) - 1)
 
static inline uint32_t test_and_set(atomic_t *val) {
uint32_t v;
88,7 → 88,7
asm volatile (
"movl $1, %0\n"
"xchgl %0, %1\n"
: "=r" (v),"=m" (val->count)
: "=r" (v),"+m" (val->count)
);
return v;
101,20 → 101,20
 
preemption_disable();
asm volatile (
"0:;"
"0:\n"
#ifdef CONFIG_HT
"pause;" /* Pentium 4's HT love this instruction */
"pause\n" /* Pentium 4's HT love this instruction */
#endif
"mov %0, %1;"
"testl %1, %1;"
"jnz 0b;" /* Lightweight looping on locked spinlock */
"mov %0, %1\n"
"testl %1, %1\n"
"jnz 0b\n" /* lightweight looping on locked spinlock */
"incl %1;" /* now use the atomic operation */
"xchgl %0, %1;"
"testl %1, %1;"
"jnz 0b;"
: "=m"(val->count),"=r"(tmp)
);
"incl %1\n" /* now use the atomic operation */
"xchgl %0, %1\n"
"testl %1, %1\n"
"jnz 0b\n"
: "+m" (val->count), "=r"(tmp)
);
/*
* Prevent critical section code from bleeding out this way up.
*/
/branches/dynload/kernel/arch/ia32/Makefile.inc
46,7 → 46,8
#
 
ifeq ($(MACHINE),athlon-xp)
CMN2 = -march=athlon-xp -mmmx -msse -m3dnow
FPU_NO_CFLAGS = -mno-mmx -mno-sse -mno-3dnow
CMN2 = -march=athlon-xp
GCC_CFLAGS += $(CMN2)
ICC_CFLAGS += $(CMN2)
SUNCC_CFLAGS += -xarch=ssea
55,7 → 56,8
CONFIG_HT = n
endif
ifeq ($(MACHINE),athlon-mp)
CMN2 = -march=athlon-mp -mmmx -msse -m3dnow
FPU_NO_CFLAGS = -mno-mmx -mno-sse -mno-3dnow
CMN2 = -march=athlon-mp
GCC_CFLAGS += $(CMN2)
ICC_CFLAGS += $(CMN2)
SUNCC_CFLAGS += xarch=ssea
63,7 → 65,8
CONFIG_HT = n
endif
ifeq ($(MACHINE),pentium3)
CMN2 = -march=pentium3 -mmmx -msse
FPU_NO_CFLAGS = -mno-mmx -mno-sse
CMN2 = -march=pentium3
GCC_CFLAGS += $(CMN2)
ICC_CFLAGS += $(CMN2)
SUNCC_CFLAGS += -xarch=sse
71,7 → 74,8
CONFIG_HT = n
endif
ifeq ($(MACHINE),core)
CMN2 = -march=prescott -mfpmath=sse -mmmx -msse -msse2 -msse3
FPU_NO_CFLAGS = -mno-mmmx -mno-sse -mno-sse2 -mno-sse3
CMN2 = -march=prescott
GCC_CFLAGS += $(CMN2)
ICC_CFLAGS += $(CMN2)
SUNCC_CFLAGS += -xarch=sse3
78,7 → 82,8
DEFS += -DCONFIG_FENCES_P4
endif
ifeq ($(MACHINE),pentium4)
GCC_CFLAGS += -march=pentium4 -mfpmath=sse -mmmx -msse -msse2
FPU_NO_CFLAGS = -mno-mmx -mno-sse -mno-sse2
GCC_CFLAGS += -march=pentium4
ICC_CFLAGS += -march=pentium4
SUNCC_CFLAGS += -xarch=sse2
DEFS += -DCONFIG_FENCES_P4