Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 2725 → Rev 2745

/trunk/kernel/generic/src/mm/slab.c
172,7 → 172,7
void *data;
slab_t *slab;
size_t fsize;
int i;
unsigned int i;
unsigned int zone = 0;
data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone);
191,8 → 191,8
}
/* Fill in slab structures */
for (i=0; i < (1 << cache->order); i++)
frame_set_parent(ADDR2PFN(KA2PA(data))+i, slab, zone);
for (i = 0; i < ((unsigned int) 1 << cache->order); i++)
frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab, zone);
 
slab->start = data;
slab->available = cache->objects;
199,7 → 199,7
slab->nextavail = 0;
slab->cache = cache;
 
for (i=0; i<cache->objects;i++)
for (i = 0; i < cache->objects; i++)
*((int *) (slab->start + i*cache->size)) = i+1;
 
atomic_inc(&cache->allocated_slabs);
371,10 → 371,10
static count_t magazine_destroy(slab_cache_t *cache,
slab_magazine_t *mag)
{
int i;
unsigned int i;
count_t frames = 0;
 
for (i=0;i < mag->busy; i++) {
for (i = 0; i < mag->busy; i++) {
frames += slab_obj_destroy(cache, mag->objs[i], NULL);
atomic_dec(&cache->cached_objs);
}
527,7 → 527,7
/* Slab cache functions */
 
/** Return number of objects that fit in certain cache size */
static int comp_objects(slab_cache_t *cache)
static unsigned int comp_objects(slab_cache_t *cache)
{
if (cache->flags & SLAB_CACHE_SLINSIDE)
return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
536,16 → 536,16
}
 
/** Return wasted space in slab */
static int badness(slab_cache_t *cache)
static unsigned int badness(slab_cache_t *cache)
{
int objects;
int ssize;
unsigned int objects;
unsigned int ssize;
 
objects = comp_objects(cache);
ssize = PAGE_SIZE << cache->order;
if (cache->flags & SLAB_CACHE_SLINSIDE)
ssize -= sizeof(slab_t);
return ssize - objects*cache->size;
return ssize - objects * cache->size;
}
 
/**
553,16 → 553,15
*/
static void make_magcache(slab_cache_t *cache)
{
int i;
unsigned int i;
ASSERT(_slab_initialized >= 2);
 
cache->mag_cache = malloc(sizeof(slab_mag_cache_t)*config.cpu_count,0);
for (i=0; i < config.cpu_count; i++) {
for (i = 0; i < config.cpu_count; i++) {
memsetb((uintptr_t)&cache->mag_cache[i],
sizeof(cache->mag_cache[i]), 0);
spinlock_initialize(&cache->mag_cache[i].lock,
"slab_maglock_cpu");
spinlock_initialize(&cache->mag_cache[i].lock, "slab_maglock_cpu");
}
}
 
654,7 → 653,7
*/
static count_t _slab_reclaim(slab_cache_t *cache, int flags)
{
int i;
unsigned int i;
slab_magazine_t *mag;
count_t frames = 0;
int magcount;
675,7 → 674,7
if (flags & SLAB_RECLAIM_ALL) {
/* Free cpu-bound magazines */
/* Destroy CPU magazines */
for (i=0; i<config.cpu_count; i++) {
for (i = 0; i < config.cpu_count; i++) {
spinlock_lock(&cache->mag_cache[i].lock);
 
mag = cache->mag_cache[i].current;
/trunk/kernel/generic/src/mm/tlb.c
81,7 → 81,7
void tlb_shootdown_start(tlb_invalidate_type_t type, asid_t asid,
uintptr_t page, count_t count)
{
int i;
unsigned int i;
 
CPU->tlb_active = 0;
spinlock_lock(&tlblock);
144,7 → 144,7
asid_t asid;
uintptr_t page;
count_t count;
int i;
unsigned int i;
ASSERT(CPU);
/trunk/kernel/generic/src/mm/backend_anon.c
98,7 → 98,7
ALIGN_DOWN(addr, PAGE_SIZE) - area->base, &leaf);
if (!frame) {
bool allocate = true;
int i;
unsigned int i;
/*
* Zero can be returned as a valid frame address.
193,13 → 193,13
for (cur = area->used_space.leaf_head.next;
cur != &area->used_space.leaf_head; cur = cur->next) {
btree_node_t *node;
int i;
unsigned int i;
node = list_get_instance(cur, btree_node_t, leaf_link);
for (i = 0; i < node->keys; i++) {
uintptr_t base = node->key[i];
count_t count = (count_t) node->value[i];
int j;
unsigned int j;
for (j = 0; j < count; j++) {
pte_t *pte;
/trunk/kernel/generic/src/mm/as.c
431,7 → 431,7
uintptr_t b = node->key[node->keys - 1];
count_t c =
(count_t) node->value[node->keys - 1];
int i = 0;
unsigned int i = 0;
if (overlaps(b, c * PAGE_SIZE, area->base,
pages * PAGE_SIZE)) {
561,7 → 561,7
for (cur = area->used_space.leaf_head.next;
cur != &area->used_space.leaf_head; cur = cur->next) {
btree_node_t *node;
int i;
unsigned int i;
node = list_get_instance(cur, btree_node_t, leaf_link);
for (i = 0; i < node->keys; i++) {
1097,7 → 1097,7
{
as_area_t *a;
btree_node_t *leaf, *lnode;
int i;
unsigned int i;
a = (as_area_t *) btree_search(&as->as_area_btree, va, &leaf);
if (a) {
1155,7 → 1155,7
{
as_area_t *a;
btree_node_t *leaf, *node;
int i;
unsigned int i;
/*
* We don't want any area to have conflicts with NULL page.
1264,7 → 1264,7
{
btree_node_t *leaf, *node;
count_t pages;
int i;
unsigned int i;
 
ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
ASSERT(count);
1546,7 → 1546,7
{
btree_node_t *leaf, *node;
count_t pages;
int i;
unsigned int i;
 
ASSERT(page == ALIGN_DOWN(page, PAGE_SIZE));
ASSERT(count);
1734,7 → 1734,7
for (cur = sh_info->pagemap.leaf_head.next;
cur != &sh_info->pagemap.leaf_head; cur = cur->next) {
btree_node_t *node;
int i;
unsigned int i;
node = list_get_instance(cur, btree_node_t, leaf_link);
for (i = 0; i < node->keys; i++)
1795,7 → 1795,7
node = list_get_instance(cur, btree_node_t, leaf_link);
int i;
unsigned int i;
for (i = 0; i < node->keys; i++) {
as_area_t *area = node->value[i];
/trunk/kernel/generic/src/mm/frame.c
120,7 → 120,7
 
static inline int frame_index_valid(zone_t *zone, index_t index)
{
return (index >= 0) && (index < zone->count);
return (index < zone->count);
}
 
/** Compute pfn_t from frame_t pointer & zone pointer */
210,7 → 210,7
spinlock_lock(&zones.lock);
 
if (hint >= zones.count || hint < 0)
if (hint >= zones.count)
hint = 0;
i = hint;
719,7 → 719,7
ipl = interrupts_disable();
spinlock_lock(&zones.lock);
 
if (z1 < 0 || z1 >= zones.count || z2 < 0 || z2 >= zones.count)
if ((z1 >= zones.count) || (z2 >= zones.count))
goto errout;
/* We can join only 2 zones with none existing inbetween */
if (z2-z1 != 1)
/trunk/kernel/generic/src/mm/backend_elf.c
103,7 → 103,7
frame = (uintptr_t) btree_search(&area->sh_info->pagemap,
ALIGN_DOWN(addr, PAGE_SIZE) - area->base, &leaf);
if (!frame) {
int i;
unsigned int i;
 
/*
* Workaround for valid NULL address.
290,7 → 290,7
mutex_lock(&area->sh_info->lock);
for (cur = &node->leaf_link; cur != &area->used_space.leaf_head;
cur = cur->next) {
int i;
unsigned int i;
node = list_get_instance(cur, btree_node_t, leaf_link);
297,7 → 297,7
for (i = 0; i < node->keys; i++) {
uintptr_t base = node->key[i];
count_t count = (count_t) node->value[i];
int j;
unsigned int j;
/*
* Skip read-only areas of used space that are backed