Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 821 → Rev 822

/kernel/trunk/test/mm/falloc2/test.c
55,7 → 55,7
__u8 val = THREAD->tid % THREADS;
index_t k;
__address * frames = (__address *) malloc(MAX_FRAMES * sizeof(__address));
__address * frames = (__address *) malloc(MAX_FRAMES * sizeof(__address), FRAME_ATOMIC);
ASSERT(frames != NULL);
 
for (run = 0; run < THREAD_RUNS; run++) {
/kernel/trunk/doc/mm
65,3 → 65,22
implied hardware support for hierarchical page tables, i.e. ia64 and sparc64.
There is only one global page hash table in the system shared by all address
spaces.
 
2.1 General allocator
 
'malloc' function accepts flags as a second argument. The flags are directly
passed to the underlying frame_alloc function.
 
1) If the flags parameter contains FRAME_ATOMIC, the allocator will not sleep.
The allocator CAN return NULL, when memory is not directly available.
The caller MUST check if NULL was not returned
 
2) If the flags parameter does not contain FRAME_ATOMIC, the allocator
will never return NULL, but it CAN sleep indefinitely. The caller
does not have to check the return value.
 
3) The maximum size that can be allocated using malloc is 128K
 
Rules 1) and 2) apply to slab_alloc as well. Using SLAB allocator
to allocate too large values is not recommended.
 
/kernel/trunk/genarch/src/acpi/matd.c
144,7 → 144,9
}
 
/* create madt apic entries index array */
madt_entries_index = (struct madt_apic_header * *) malloc(madt_entries_index_cnt * sizeof(struct madt_apic_header * *));
madt_entries_index = (struct madt_apic_header * *) malloc(madt_entries_index_cnt * sizeof(struct madt_apic_header * *), FRAME_ATOMIC);
if (!madt_entries_index)
panic("Memory allocation error.");
 
__u32 index = 0;
 
/kernel/trunk/generic/include/mm/frame.h
53,8 → 53,10
#define FRAME_ERROR 2 /* frame_alloc return status */
 
/* Return true if the interlvals overlap */
static inline int overlaps(__address s1,__address e1, __address s2, __address e2)
static inline int overlaps(__address s1,__address sz1, __address s2, __address sz2)
{
__address e1 = s1+sz1;
__address e2 = s2+sz2;
if (s1 >= s2 && s1 < e2)
return 1;
if (e1 >= s2 && e1 < e2)
95,12 → 97,13
__address frame_alloc_generic(__u8 order, int flags, int * status, int *pzone);
extern void frame_free(__address addr);
 
extern void zone_create(pfn_t start, count_t count, pfn_t confframe, int flags);
 
extern int zone_create(pfn_t start, count_t count, pfn_t confframe, int flags);
void * frame_get_parent(pfn_t frame, int hint);
void frame_set_parent(pfn_t frame, void *data, int hint);
void frame_mark_unavailable(pfn_t start, count_t count);
__address zone_conf_size(pfn_t start, count_t count);
__address zone_conf_size(count_t count);
void zone_merge(int z1, int z2);
void zone_merge_all(void);
 
/*
* Console functions
/kernel/trunk/generic/include/mm/slab.h
125,8 → 125,6
extern void slab_print_list(void);
 
/* Malloc support */
extern void * kalloc(unsigned int size, int flags);
extern void kfree(void *obj);
#define malloc(x) kalloc(x, FRAME_ATOMIC)
#define free(x) kfree(x)
extern void * malloc(unsigned int size, int flags);
extern void free(void *obj);
#endif
/kernel/trunk/generic/include/mm/buddy.h
45,6 → 45,7
void (*mark_available)(buddy_system_t *, link_t *); /**< Mark block as busy */
/** Find parent of block that has given order */
link_t *(* find_block)(buddy_system_t *, link_t *, __u8);
void (* print_id)(buddy_system_t *, link_t *);
};
 
struct buddy_system {
/kernel/trunk/generic/src/proc/task.c
64,19 → 64,19
ipl_t ipl;
task_t *ta;
ta = (task_t *) malloc(sizeof(task_t));
if (ta) {
spinlock_initialize(&ta->lock, "task_ta_lock");
list_initialize(&ta->th_head);
list_initialize(&ta->tasks_link);
ta->as = as;
ipl = interrupts_disable();
spinlock_lock(&tasks_lock);
list_append(&ta->tasks_link, &tasks_head);
spinlock_unlock(&tasks_lock);
interrupts_restore(ipl);
}
ta = (task_t *) malloc(sizeof(task_t), 0);
 
spinlock_initialize(&ta->lock, "task_ta_lock");
list_initialize(&ta->th_head);
list_initialize(&ta->tasks_link);
ta->as = as;
ipl = interrupts_disable();
spinlock_lock(&tasks_lock);
list_append(&ta->tasks_link, &tasks_head);
spinlock_unlock(&tasks_lock);
interrupts_restore(ipl);
 
return ta;
}
 
/kernel/trunk/generic/src/proc/thread.c
226,68 → 226,66
thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags)
{
thread_t *t;
 
ipl_t ipl;
t = (thread_t *) slab_alloc(thread_slab, 0);
if (t) {
ipl_t ipl;
/* Not needed, but good for debugging */
memsetb((__address)t->kstack, THREAD_STACK_SIZE, 0);
 
ipl = interrupts_disable();
spinlock_lock(&tidlock);
t->tid = ++last_tid;
spinlock_unlock(&tidlock);
interrupts_restore(ipl);
/* Not needed, but good for debugging */
memsetb((__address)t->kstack, THREAD_STACK_SIZE, 0);
ipl = interrupts_disable();
spinlock_lock(&tidlock);
t->tid = ++last_tid;
spinlock_unlock(&tidlock);
interrupts_restore(ipl);
context_save(&t->saved_context);
context_set(&t->saved_context, FADDR(cushion), (__address) t->kstack, THREAD_STACK_SIZE);
the_initialize((the_t *) t->kstack);
ipl = interrupts_disable();
t->saved_context.ipl = interrupts_read();
interrupts_restore(ipl);
t->thread_code = func;
t->thread_arg = arg;
t->ticks = -1;
t->priority = -1; /* start in rq[0] */
t->cpu = NULL;
t->flags = 0;
t->state = Entering;
t->call_me = NULL;
t->call_me_with = NULL;
timeout_initialize(&t->sleep_timeout);
t->sleep_queue = NULL;
t->timeout_pending = 0;
t->rwlock_holder_type = RWLOCK_NONE;
context_save(&t->saved_context);
context_set(&t->saved_context, FADDR(cushion), (__address) t->kstack, THREAD_STACK_SIZE);
the_initialize((the_t *) t->kstack);
t->task = task;
t->fpu_context_exists=0;
t->fpu_context_engaged=0;
/*
* Register this thread in the system-wide list.
*/
ipl = interrupts_disable();
spinlock_lock(&threads_lock);
list_append(&t->threads_link, &threads_head);
spinlock_unlock(&threads_lock);
/*
* Attach to the containing task.
*/
spinlock_lock(&task->lock);
list_append(&t->th_link, &task->th_head);
spinlock_unlock(&task->lock);
interrupts_restore(ipl);
 
ipl = interrupts_disable();
t->saved_context.ipl = interrupts_read();
interrupts_restore(ipl);
t->thread_code = func;
t->thread_arg = arg;
t->ticks = -1;
t->priority = -1; /* start in rq[0] */
t->cpu = NULL;
t->flags = 0;
t->state = Entering;
t->call_me = NULL;
t->call_me_with = NULL;
timeout_initialize(&t->sleep_timeout);
t->sleep_queue = NULL;
t->timeout_pending = 0;
t->rwlock_holder_type = RWLOCK_NONE;
t->task = task;
t->fpu_context_exists=0;
t->fpu_context_engaged=0;
/*
* Register this thread in the system-wide list.
*/
ipl = interrupts_disable();
spinlock_lock(&threads_lock);
list_append(&t->threads_link, &threads_head);
spinlock_unlock(&threads_lock);
 
/*
* Attach to the containing task.
*/
spinlock_lock(&task->lock);
list_append(&t->th_link, &task->th_head);
spinlock_unlock(&task->lock);
 
interrupts_restore(ipl);
}
 
return t;
}
 
/kernel/trunk/generic/src/main/main.c
115,8 → 115,8
config.kernel_size = ALIGN_UP(hardcoded_ktext_size + hardcoded_kdata_size, PAGE_SIZE);
stackaddr = config.base + config.kernel_size;
/* Avoid placing kernel on top of init */
if (overlaps(stackaddr,stackaddr+CONFIG_STACK_SIZE,
config.init_addr, config.init_addr+config.init_size)) {
if (overlaps(stackaddr,CONFIG_STACK_SIZE,
config.init_addr, config.init_size)) {
stackaddr = ALIGN_UP(config.init_addr+config.init_size,
CONFIG_STACK_SIZE);
/kernel/trunk/generic/src/lib/sort.c
41,6 → 41,8
* This is only a wrapper that takes care of memory allocations for storing
* the pivot and temporary elements for generic quicksort algorithm.
*
* This function _can_ sleep
*
* @param data Pointer to data to be sorted.
* @param n Number of elements to be sorted.
* @param e_size Size of one element.
55,12 → 57,8
void * pivot = buf_pivot;
 
if (e_size > EBUFSIZE) {
pivot = (void *) malloc(e_size);
tmp = (void *) malloc(e_size);
if (!tmp || !pivot) {
panic("Cannot allocate memory\n");
}
pivot = (void *) malloc(e_size, 0);
tmp = (void *) malloc(e_size, 0);
}
 
_qsort(data, n, e_size, cmp, tmp, pivot);
126,11 → 124,7
void * slot = buf_slot;
if (e_size > EBUFSIZE) {
slot = (void *) malloc(e_size);
if (!slot) {
panic("Cannot allocate memory\n");
}
slot = (void *) malloc(e_size, 0);
}
 
_bubblesort(data, n, e_size, cmp, slot);
/kernel/trunk/generic/src/cpu/cpu.c
53,7 → 53,8
#ifdef CONFIG_SMP
if (config.cpu_active == 1) {
#endif /* CONFIG_SMP */
cpus = (cpu_t *) malloc(sizeof(cpu_t) * config.cpu_count);
cpus = (cpu_t *) malloc(sizeof(cpu_t) * config.cpu_count,
FRAME_ATOMIC);
if (!cpus)
panic("malloc/cpus");
 
/kernel/trunk/generic/src/adt/hash_table.c
51,7 → 51,7
ASSERT(op && op->hash && op->compare);
ASSERT(max_keys > 0);
h->entry = malloc(m * sizeof(link_t *));
h->entry = malloc(m * sizeof(link_t *), 0);
if (!h->entry) {
panic("cannot allocate memory for hash table\n");
}
/kernel/trunk/generic/src/mm/slab.c
542,7 → 542,7
ASSERT(_slab_initialized >= 2);
 
cache->mag_cache = kalloc(sizeof(slab_mag_cache_t)*config.cpu_count,0);
cache->mag_cache = malloc(sizeof(slab_mag_cache_t)*config.cpu_count,0);
for (i=0; i < config.cpu_count; i++) {
memsetb((__address)&cache->mag_cache[i],
sizeof(cache->mag_cache[i]), 0);
705,7 → 705,7
panic("Destroying cache that is not empty.");
 
if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
kfree(cache->mag_cache);
free(cache->mag_cache);
slab_free(&slab_cache_cache, cache);
}
 
871,7 → 871,7
 
/**************************************/
/* kalloc/kfree functions */
void * kalloc(unsigned int size, int flags)
void * malloc(unsigned int size, int flags)
{
int idx;
 
887,7 → 887,7
}
 
 
void kfree(void *obj)
void free(void *obj)
{
slab_t *slab;
 
/kernel/trunk/generic/src/mm/as.c
78,20 → 78,19
{
as_t *as;
 
as = (as_t *) malloc(sizeof(as_t));
if (as) {
list_initialize(&as->as_with_asid_link);
spinlock_initialize(&as->lock, "as_lock");
list_initialize(&as->as_area_head);
as = (as_t *) malloc(sizeof(as_t), 0);
 
if (flags & FLAG_AS_KERNEL)
as->asid = ASID_KERNEL;
else
as->asid = ASID_INVALID;
list_initialize(&as->as_with_asid_link);
spinlock_initialize(&as->lock, "as_lock");
list_initialize(&as->as_area_head);
if (flags & FLAG_AS_KERNEL)
as->asid = ASID_KERNEL;
else
as->asid = ASID_INVALID;
as->page_table = page_table_create(flags);
 
as->page_table = page_table_create(flags);
}
 
return as;
}
 
121,18 → 120,17
* TODO: test as_area which is to be created doesn't overlap with an existing one.
*/
a = (as_area_t *) malloc(sizeof(as_area_t));
if (a) {
spinlock_initialize(&a->lock, "as_area_lock");
link_initialize(&a->link);
a->type = type;
a->size = size;
a->base = base;
list_append(&a->link, &as->as_area_head);
}
a = (as_area_t *) malloc(sizeof(as_area_t), 0);
 
spinlock_initialize(&a->lock, "as_area_lock");
link_initialize(&a->link);
a->type = type;
a->size = size;
a->base = base;
list_append(&a->link, &as->as_area_head);
 
spinlock_unlock(&as->lock);
interrupts_restore(ipl);
 
/kernel/trunk/generic/src/mm/buddy.c
139,7 → 139,8
if (tmp == right) {
right = left;
left = tmp;
}
}
ASSERT(tmp == left);
b->op->mark_busy(b, left);
buddy_system_free(b, right);
b->op->mark_available(b, left);
290,8 → 291,15
cnt++;
}
printf("#%d\t%d\t%dK\t\t%dK\t\t%d\n", i, cnt, (cnt * (1 << i) * elem_size) >> 10, ((1 << i) * elem_size) >> 10, 1 << i);
printf("#%d\t%d\t%dK\t\t%dK\t\t%d\t", i, cnt, (cnt * (1 << i) * elem_size) >> 10, ((1 << i) * elem_size) >> 10, 1 << i);
if (!list_empty(&b->order[i])) {
for (cur = b->order[i].next; cur != &b->order[i]; cur = cur->next) {
b->op->print_id(b, cur);
printf(" ");
}
}
printf("\n");
block_count += cnt;
elem_count += (1 << i) * cnt;
}
/kernel/trunk/generic/src/mm/frame.c
50,6 → 50,7
#include <print.h>
#include <align.h>
#include <mm/slab.h>
#include <bitops.h>
 
typedef struct {
count_t refcount; /**< tracking of shared frames */
121,29 → 122,43
 
/**
* Insert-sort zone into zones list
*
* @return zone number on success, -1 on error
*/
static void zones_add_zone(zone_t *zone)
static int zones_add_zone(zone_t *newzone)
{
int i;
int i,j;
ipl_t ipl;
zone_t *z;
 
ipl = interrupts_disable();
spinlock_lock(&zones.lock);
/* Try to merge */
if (zone->flags & ZONE_JOIN) {
for (i=0; i < zones.count; i++) {
spinlock_lock(&zones.info[i]->lock);
/* Join forward, join backward */
panic("Not implemented");
if (zones.count+1 == ZONES_MAX)
panic("Maximum zone(%d) count exceeded.", ZONES_MAX);
 
spinlock_unlock(&zones.info[i]->lock);
for (i=0; i < zones.count; i++) {
/* Check for overflow */
z = zones.info[zones.count];
if (overlaps(newzone->base,newzone->count,
z->base, z->count)) {
printf("Zones overlap!\n");
return -1;
}
spinlock_unlock(&zones.lock);
} else {
if (zones.count+1 == ZONES_MAX)
panic("Maximum zone(%d) count exceeded.", ZONES_MAX);
zones.info[zones.count++] = zone;
if (z->base < newzone->base)
break;
}
/* Move other zones up */
for (j=i;j < zones.count;j++)
zones.info[j+1] = zones.info[j];
 
zones.info[i] = newzone;
zones.count++;
 
spinlock_unlock(&zones.lock);
interrupts_restore(ipl);
 
return i;
}
 
/**
186,6 → 201,12
return NULL;
}
 
/** @return True if zone can allocate specified order */
static int zone_can_alloc(zone_t *z, __u8 order)
{
return buddy_system_can_alloc(z->buddy_system, order);
}
 
/**
* Find AND LOCK zone that can allocate order frames
*
209,7 → 230,7
spinlock_lock(&z->lock);
 
/* Check if the zone has 2^order frames area available */
if (buddy_system_can_alloc(z->buddy_system, order)) {
if (zone_can_alloc(z, order)) {
spinlock_unlock(&zones.lock);
if (pzone)
*pzone = i;
252,8 → 273,18
return NULL;
}
 
static void zone_buddy_print_id(buddy_system_t *b, link_t *block)
{
frame_t * frame;
zone_t * zone;
index_t index;
 
frame = list_get_instance(block, frame_t, buddy_link);
zone = (zone_t *) b->data;
index = frame_index(zone, frame);
printf("%d", index);
}
 
/** Buddy system find_buddy implementation
*
* @param b Buddy system.
318,7 → 349,8
* @return Coalesced block (actually block that represents lower address)
*/
static link_t * zone_buddy_coalesce(buddy_system_t *b, link_t * block_1,
link_t * block_2) {
link_t * block_2)
{
frame_t *frame1, *frame2;
frame1 = list_get_instance(block_1, frame_t, buddy_link);
360,6 → 392,7
*/
static void zone_buddy_mark_busy(buddy_system_t *b, link_t * block) {
frame_t * frame;
 
frame = list_get_instance(block, frame_t, buddy_link);
frame->refcount = 1;
}
384,7 → 417,8
.get_order = zone_buddy_get_order,
.mark_busy = zone_buddy_mark_busy,
.mark_available = zone_buddy_mark_available,
.find_block = zone_buddy_find_block
.find_block = zone_buddy_find_block,
.print_id = zone_buddy_print_id
};
 
/*************************************/
393,10 → 427,11
/** Allocate frame in particular zone
*
* Assume zone is locked
* Panics, if allocation is impossible.
*
* @return Frame index in zone
*/
static pfn_t zone_frame_alloc(zone_t *zone,__u8 order, int flags, int *status)
static pfn_t zone_frame_alloc(zone_t *zone,__u8 order)
{
pfn_t v;
link_t *tmp;
458,6 → 493,8
link_t *link;
 
frame = zone_get_frame(zone, frame_idx);
if (frame->refcount)
return;
link = buddy_system_alloc_block(zone->buddy_system,
&frame->buddy_link);
ASSERT(link);
464,6 → 501,196
zone->free_count--;
}
 
/**
* Join 2 zones
*
* Expect zone_t *z to point to space at least zone_conf_size large
*
* Assume z1 & z2 are locked
*/
 
static void _zone_merge(zone_t *z, zone_t *z1, zone_t *z2)
{
__u8 max_order;
int i, z2idx;
pfn_t frame_idx;
frame_t *frame;
 
ASSERT(!overlaps(z1->base,z1->count,z2->base,z2->count));
ASSERT(z1->base < z2->base);
 
spinlock_initialize(&z->lock, "zone_lock");
z->base = z1->base;
z->count = z2->base+z2->count - z1->base;
z->flags = z1->flags & z2->flags;
 
z->free_count = z1->free_count + z2->free_count;
z->busy_count = z1->busy_count + z2->busy_count;
max_order = fnzb(z->count);
 
z->buddy_system = (buddy_system_t *)&z[1];
buddy_system_create(z->buddy_system, max_order,
&zone_buddy_system_operations,
(void *) z);
 
z->frames = (frame_t *)((void *)z->buddy_system+buddy_conf_size(max_order));
for (i = 0; i < z->count; i++) {
/* This marks all frames busy */
frame_initialize(&z->frames[i]);
}
/* Copy frames from both zones to preserve full frame orders,
* parents etc. Set all frames with refcount=0 to 1, because
* we add all free frames to buddy allocator later again, clear
* order to 0.
*/
for (i=0; i<z1->count; i++)
z->frames[i] = z1->frames[i];
for (i=0; i < z2->count; i++) {
z2idx = i + (z2->base - z1->base);
z->frames[z2idx] = z2->frames[i];
}
for (i=0; i < z->count; i++) {
if (!z->frames[i].refcount) {
z->frames[i].refcount = 1;
z->frames[i].buddy_order = 0;
}
}
/* Add free blocks from the 2 original zones */
while (zone_can_alloc(z1, 0)) {
frame_idx = zone_frame_alloc(z1, 0);
frame = &z->frames[frame_idx];
frame->refcount = 0;
buddy_system_free(z->buddy_system, &frame->buddy_link);
}
while (zone_can_alloc(z2, 0)) {
frame_idx = zone_frame_alloc(z2, 0);
frame = &z->frames[frame_idx + (z2->base-z1->base)];
frame->refcount = 0;
buddy_system_free(z->buddy_system, &frame->buddy_link);
}
}
 
/** Return old configuration frames into the zone
*
* We have several cases
* - the conf. data is outside of zone -> exit, shall we call frame_free??
* - the conf. data was created by zone_create -> free every frame
* - the conf. data was created by merge in frame_alloc -> free first frame
* (the difference is in order)
*/
static void return_config_frames(zone_t *newzone, zone_t *oldzone)
{
pfn_t pfn;
frame_t *frame;
count_t cframes;
int i;
 
pfn = ADDR2PFN((__address)KA2PA(oldzone));
cframes = SIZE2FRAMES(zone_conf_size(oldzone->count));
if (pfn < newzone->base || pfn >= newzone->base + newzone->count)
return;
 
frame = &newzone->frames[pfn - newzone->base];
if (frame->buddy_order) {
/* Normally zone config data is hidden, show it again */
newzone->busy_count += (1 << frame->buddy_order);
zone_frame_free(newzone, pfn - newzone->base);
return;
}
 
for (i=0; i < cframes; i++) {
newzone->busy_count++;
zone_frame_free(newzone, pfn+i-newzone->base);
}
}
 
/** Merge zones z1 and z2
*
* - the zones must be 2 zones with no zone existing in between,
* which means that z2 = z1+1
*
* - When you create a new zone, the frame allocator configuration does
* not to be 2^order size. Once the allocator is running it is no longer
* possible, merged configuration data occupies more space :-/
*/
void zone_merge(int z1, int z2)
{
ipl_t ipl;
zone_t *zone1, *zone2, *newzone;
int cframes;
__u8 order;
int i;
pfn_t pfn;
 
ipl = interrupts_disable();
spinlock_lock(&zones.lock);
 
if (z1 < 0 || z1 >= zones.count || z2 < 0 || z2 >= zones.count)
goto errout;
/* We can join only 2 zones with none existing inbetween */
if (z2-z1 != 1)
goto errout;
 
zone1 = zones.info[z1];
zone2 = zones.info[z2];
spinlock_lock(&zone1->lock);
spinlock_lock(&zone2->lock);
 
cframes = SIZE2FRAMES(zone_conf_size(zone2->base+zone2->count-zone1->base));
order = fnzb(cframes) + 1;
 
/* Allocate zonedata inside one of the zones */
if (zone_can_alloc(zone1, order))
pfn = zone1->base + zone_frame_alloc(zone1, order);
else if (zone_can_alloc(zone2, order))
pfn = zone2->base + zone_frame_alloc(zone2, order);
else
goto errout2;
 
newzone = (zone_t *)PA2KA(PFN2ADDR(pfn));
 
_zone_merge(newzone, zone1, zone2);
 
/* Subtract zone information from busy frames */
newzone->busy_count -= (1 << order);
 
zones.info[z1] = newzone;
for (i=z2+1;i < zones.count;i++)
zones.info[i-1] = zones.info[i];
zones.count--;
 
/* Free old zone information */
return_config_frames(newzone, zone1);
return_config_frames(newzone, zone2);
errout2:
/* Nobody is allowed to enter to zone, so we are safe
* to touch the spinlocks last time */
spinlock_unlock(&zone1->lock);
spinlock_unlock(&zone2->lock);
errout:
spinlock_unlock(&zones.lock);
interrupts_restore(ipl);
}
 
 
/**
* Merge all zones into one big zone
*
* It is reasonable to do this on systems whose bios reports parts in chunks,
* so that we could have 1 zone (it's faster).
*/
void zone_merge_all(void)
{
int count = zones.count;
 
while (zones.count > 1 && --count) {
zone_merge(0,1);
break;
}
}
 
/** Create frame zone
*
* Create new frame zone.
475,8 → 702,7
*
* @return Initialized zone.
*/
static zone_t * zone_construct(pfn_t start, count_t count,
zone_t *z, int flags)
static void zone_construct(pfn_t start, count_t count, zone_t *z, int flags)
{
int i;
__u8 max_order;
491,8 → 717,7
/*
* Compute order for buddy system, initialize
*/
for (max_order = 0; count >> max_order; max_order++)
;
max_order = fnzb(count);
z->buddy_system = (buddy_system_t *)&z[1];
buddy_system_create(z->buddy_system, max_order,
502,31 → 727,30
/* Allocate frames _after_ the conframe */
/* Check sizes */
z->frames = (frame_t *)((void *)z->buddy_system+buddy_conf_size(max_order));
 
for (i = 0; i<count; i++) {
frame_initialize(&z->frames[i]);
}
 
/* Stuffing frames */
for (i = 0; i < count; i++) {
z->frames[i].refcount = 0;
buddy_system_free(z->buddy_system, &z->frames[i].buddy_link);
}
return z;
}
 
 
/** Compute configuration data size for zone */
__address zone_conf_size(pfn_t start, count_t count)
__address zone_conf_size(count_t count)
{
int size = sizeof(zone_t) + count*sizeof(frame_t);
int max_order;
 
for (max_order = 0; count >> max_order; max_order++)
;
max_order = fnzb(count);
size += buddy_conf_size(max_order);
return size;
}
 
 
/** Create and add zone to system
*
* @param confframe Where configuration frame is supposed to be.
534,13 → 758,16
* If confframe is given _outside_ this zone, it is expected,
* that the area is already marked BUSY and big enough
* to contain zone_conf_size() amount of data
*
* @return Zone number or -1 on error
*/
void zone_create(pfn_t start, count_t count, pfn_t confframe, int flags)
int zone_create(pfn_t start, count_t count, pfn_t confframe, int flags)
{
zone_t *z;
__address addr,endaddr;
__address addr;
count_t confcount;
int i;
int znum;
 
/* Theoretically we could have here 0, practically make sure
* nobody tries to do that. If some platform requires, remove
550,18 → 777,17
/* If conframe is supposed to be inside our zone, then make sure
* it does not span kernel & init
*/
confcount = SIZE2FRAMES(zone_conf_size(start,count));
confcount = SIZE2FRAMES(zone_conf_size(count));
if (confframe >= start && confframe < start+count) {
for (;confframe < start+count;confframe++) {
addr = PFN2ADDR(confframe);
endaddr = PFN2ADDR (confframe + confcount);
if (overlaps(addr, endaddr, KA2PA(config.base),
KA2PA(config.base+config.kernel_size)))
if (overlaps(addr, PFN2ADDR(confcount),
KA2PA(config.base),config.kernel_size))
continue;
if (config.init_addr)
if (overlaps(addr,endaddr,
if (overlaps(addr,PFN2ADDR(confcount),
KA2PA(config.init_addr),
KA2PA(config.init_addr+config.init_size)))
config.init_size))
continue;
break;
}
569,14 → 795,19
panic("Cannot find configuration data for zone.");
}
 
z = zone_construct(start, count, (zone_t *)PA2KA(PFN2ADDR(confframe)), flags);
zones_add_zone(z);
z = (zone_t *)PA2KA(PFN2ADDR(confframe));
zone_construct(start, count, z, flags);
znum = zones_add_zone(z);
if (znum == -1)
return -1;
 
/* If confdata in zone, mark as unavailable */
if (confframe >= start && confframe < start+count)
for (i=confframe; i<confframe+confcount; i++) {
zone_mark_unavailable(z, i - z->base);
}
 
return znum;
}
 
/***************************************/
657,7 → 888,7
panic("Sleep not implemented.\n");
goto loop;
}
v = zone_frame_alloc(zone,order,flags,status);
v = zone_frame_alloc(zone,order);
v += zone->base;
 
spinlock_unlock(&zone->lock);
670,7 → 901,7
 
/** Free a frame.
*
* Find respective frame structrue for supplied addr.
* Find respective frame structure for supplied addr.
* Decrement frame reference count.
* If it drops to zero, move the frame structure to free list.
*
704,7 → 935,7
zone_t *zone;
int prefzone = 0;
 
for (i=0; i<count; i++) {
for (i=0; i < count; i++) {
zone = find_zone_and_lock(start+i,&prefzone);
if (!zone) /* PFN not found */
continue;
747,12 → 978,12
 
ipl = interrupts_disable();
spinlock_lock(&zones.lock);
printf("Base address\tFree Frames\tBusy Frames\n");
printf("------------\t-----------\t-----------\n");
printf("# Base address\tFree Frames\tBusy Frames\n");
printf(" ------------\t-----------\t-----------\n");
for (i=0;i<zones.count;i++) {
zone = zones.info[i];
spinlock_lock(&zone->lock);
printf("%L\t%d\t\t%d\n",PFN2ADDR(zone->base),
printf("%d %L\t%d\t\t%d\n",i,PFN2ADDR(zone->base),
zone->free_count, zone->busy_count);
spinlock_unlock(&zone->lock);
}
762,35 → 993,37
 
/** Prints zone details
*
* @param base Zone base address
* @param base Zone base address OR zone number
*/
void zone_print_one(int znum) {
void zone_print_one(int num) {
zone_t *zone = NULL;
ipl_t ipl;
int i;
 
ipl = interrupts_disable();
spinlock_lock(&zones.lock);
if (znum >= zones.count || znum < 0) {
printf("Zone number out of bounds.\n");
spinlock_unlock(&zones.lock);
interrupts_restore(ipl);
return;
 
for (i=0;i < zones.count; i++) {
if (i == num || zones.info[i]->base == ADDR2PFN(num)) {
zone = zones.info[i];
break;
}
}
if (!zone) {
printf("Zone not found.\n");
goto out;
}
zone = zones.info[znum];
spinlock_lock(&zone->lock);
printf("Memory zone information\n\n");
printf("Memory zone information\n");
printf("Zone base address: %P\n", PFN2ADDR(zone->base));
printf("Zone size: %d frames (%dK)\n", zone->count, ((zone->count) * FRAME_SIZE) >> 10);
printf("Allocated space: %d frames (%dK)\n", zone->busy_count, (zone->busy_count * FRAME_SIZE) >> 10);
printf("Available space: %d (%dK)\n", zone->free_count, (zone->free_count * FRAME_SIZE) >> 10);
printf("\nBuddy allocator structures:\n\n");
buddy_system_structure_print(zone->buddy_system, FRAME_SIZE);
spinlock_unlock(&zone->lock);
out:
spinlock_unlock(&zones.lock);
interrupts_restore(ipl);
}
/kernel/trunk/arch/amd64/src/pm.c
198,7 → 198,7
tss_p = &tss;
}
else {
tss_p = (struct tss *) malloc(sizeof(struct tss));
tss_p = (struct tss *) malloc(sizeof(struct tss),FRAME_ATOMIC);
if (!tss_p)
panic("could not allocate TSS\n");
}
/kernel/trunk/arch/ia32/src/pm.c
190,7 → 190,7
tss_p = &tss;
}
else {
tss_p = (struct tss *) malloc(sizeof(struct tss));
tss_p = (struct tss *) malloc(sizeof(struct tss),FRAME_ATOMIC);
if (!tss_p)
panic("could not allocate TSS\n");
}
/kernel/trunk/arch/ia32/src/smp/smp.c
138,7 → 138,7
/*
* Prepare new GDT for CPU in question.
*/
if (!(gdt_new = (struct descriptor *) malloc(GDT_ITEMS*sizeof(struct descriptor))))
if (!(gdt_new = (struct descriptor *) malloc(GDT_ITEMS*sizeof(struct descriptor), FRAME_ATOMIC)))
panic("couldn't allocate memory for GDT\n");
 
memcpy(gdt_new, gdt, GDT_ITEMS * sizeof(struct descriptor));
/kernel/trunk/arch/ia32/src/mm/frame.c
131,5 → 131,7
frame_mark_unavailable(0xd000 >> FRAME_WIDTH,3);
#endif
#endif
/* Merge all zones to 1 big zone */
zone_merge_all();
}
}