Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 761 → Rev 762

/kernel/trunk/test/mm/falloc1/test.c
55,7 → 55,7
printf("Allocating %d frames blocks ... ", 1 << order);
allocated = 0;
for (i = 0; i < MAX_FRAMES >> order; i++) {
frames[allocated] = frame_alloc(FRAME_ATOMIC | FRAME_KA, order, &status);
frames[allocated] = frame_alloc(FRAME_ATOMIC | FRAME_KA, order, &status, NULL);
if (ALIGN_UP(frames[allocated], FRAME_SIZE << order) != frames[allocated]) {
panic("Test failed. Block at address %X (size %dK) is not aligned\n", frames[allocated], (FRAME_SIZE << order) >> 10);
/kernel/trunk/test/mm/falloc2/test.c
63,7 → 63,7
printf("Thread #%d: Allocating %d frames blocks ... \n", THREAD->tid, 1 << order);
allocated = 0;
for (i = 0; i < (MAX_FRAMES >> order); i++) {
frames[allocated] = frame_alloc(FRAME_ATOMIC | FRAME_KA, order, &status);
frames[allocated] = frame_alloc(FRAME_ATOMIC | FRAME_KA, order, &status, NULL);
if (status == 0) {
memsetb(frames[allocated], FRAME_SIZE << order, val);
allocated++;
/kernel/trunk/test/mm/slab1/test.c
28,8 → 28,37
 
#include <test.h>
#include <mm/slab.h>
#include <print.h>
 
#define VAL_SIZE 128
#define VAL_COUNT 1024
 
void * data[16384];
 
void test(void)
{
slab_cache_create("test_cache", 10, 0, NULL, NULL, 0);
slab_cache_t *cache;
int i;
 
printf("Creating cache.\n");
cache = slab_cache_create("test_cache", VAL_SIZE, 0, NULL, NULL, SLAB_CACHE_NOMAGAZINE);
slab_print_list();
printf("Destroying cache.\n");
slab_cache_destroy(cache);
 
printf("Creating cache.\n");
cache = slab_cache_create("test_cache", VAL_SIZE, 0, NULL, NULL,
SLAB_CACHE_NOMAGAZINE);
printf("Allocating %d items...", VAL_COUNT);
for (i=0; i < VAL_COUNT; i++) {
data[i] = slab_alloc(cache, 0);
}
printf("done.\n");
printf("Freeing %d items...", VAL_COUNT);
for (i=0; i < VAL_COUNT; i++) {
slab_free(cache, data[i]);
}
printf("done.\n");
}
/kernel/trunk/test/mm/mapping1/test.c
47,8 → 47,8
 
printf("Memory management test mapping #1\n");
 
frame0 = frame_alloc(FRAME_KA, ONE_FRAME, NULL);
frame1 = frame_alloc(FRAME_KA, ONE_FRAME, NULL);
frame0 = frame_alloc(FRAME_KA, ONE_FRAME, NULL, NULL);
frame1 = frame_alloc(FRAME_KA, ONE_FRAME, NULL, NULL);
 
printf("Writing %L to physical address %P.\n", VALUE0, KA2PA(frame0));
*((__u32 *) frame0) = VALUE0;
/kernel/trunk/genarch/src/mm/as_ht.c
53,7 → 53,7
pte_t *ht_create(int flags)
{
if (!page_ht) {
page_ht = (pte_t *) frame_alloc(FRAME_KA | FRAME_PANIC, HT_WIDTH - FRAME_WIDTH, NULL);
page_ht = (pte_t *) frame_alloc(FRAME_KA | FRAME_PANIC, HT_WIDTH - FRAME_WIDTH, NULL, NULL);
memsetb((__address) page_ht, HT_SIZE, 0);
}
return page_ht;
/kernel/trunk/genarch/src/mm/page_pt.c
65,7 → 65,7
ptl0 = (pte_t *) PA2KA((__address) as->page_table);
 
if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT) {
newpt = frame_alloc(FRAME_KA, ONE_FRAME, NULL);
newpt = frame_alloc(FRAME_KA, ONE_FRAME, NULL, NULL);
memsetb(newpt, PAGE_SIZE, 0);
SET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page), KA2PA(newpt));
SET_PTL1_FLAGS(ptl0, PTL0_INDEX(page), PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | PAGE_WRITE);
74,7 → 74,7
ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
 
if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT) {
newpt = frame_alloc(FRAME_KA, ONE_FRAME, NULL);
newpt = frame_alloc(FRAME_KA, ONE_FRAME, NULL, NULL);
memsetb(newpt, PAGE_SIZE, 0);
SET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page), KA2PA(newpt));
SET_PTL2_FLAGS(ptl1, PTL1_INDEX(page), PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | PAGE_WRITE);
83,7 → 83,7
ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
 
if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT) {
newpt = frame_alloc(FRAME_KA, ONE_FRAME, NULL);
newpt = frame_alloc(FRAME_KA, ONE_FRAME, NULL, NULL);
memsetb(newpt, PAGE_SIZE, 0);
SET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page), KA2PA(newpt));
SET_PTL3_FLAGS(ptl2, PTL2_INDEX(page), PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | PAGE_WRITE);
/kernel/trunk/genarch/src/mm/as_pt.c
56,7 → 56,7
pte_t *src_ptl0, *dst_ptl0;
ipl_t ipl;
 
dst_ptl0 = (pte_t *) frame_alloc(FRAME_KA | FRAME_PANIC, ONE_FRAME, NULL);
dst_ptl0 = (pte_t *) frame_alloc(FRAME_KA | FRAME_PANIC, ONE_FRAME, NULL, NULL);
 
if (flags & FLAG_AS_KERNEL) {
memsetb((__address) dst_ptl0, PAGE_SIZE, 0);
/kernel/trunk/generic/include/mm/frame.h
39,9 → 39,10
 
#define ONE_FRAME 0
 
#define FRAME_KA 1 /* skip frames conflicting with user address space */
#define FRAME_PANIC 2 /* panic on failure */
#define FRAME_ATOMIC 4 /* do not panic and do not sleep on failure */
#define FRAME_KA 0x1 /* skip frames conflicting with user address space */
#define FRAME_PANIC 0x2 /* panic on failure */
#define FRAME_ATOMIC 0x4 /* do not panic and do not sleep on failure */
#define FRAME_NO_RECLAIM 0x8 /* Do not start reclaiming when no free memory */
 
#define FRAME_OK 0 /* frame_alloc return status */
#define FRAME_NO_MEMORY 1 /* frame_alloc return status */
78,7 → 79,7
count_t refcount; /**< tracking of shared frames */
__u8 buddy_order; /**< buddy system block order */
link_t buddy_link; /**< link to the next free block inside one order */
slab_slab_t *slab; /**< If allocated by slab, this points there */
void *parent; /**< If allocated by slab, this points there */
};
 
struct region {
100,7 → 101,7
extern void frame_init(void);
extern void frame_initialize(frame_t *frame, zone_t *zone);
 
__address frame_alloc(int flags, __u8 order, int * status);
__address frame_alloc(int flags, __u8 order, int * status, zone_t **pzone);
extern void frame_free(__address addr);
 
zone_t * get_zone_by_frame(frame_t * frame);
114,6 → 115,7
void zone_buddy_set_order(buddy_system_t *b, link_t * block, __u8 order);
__u8 zone_buddy_get_order(buddy_system_t *b, link_t * block);
void zone_buddy_mark_busy(buddy_system_t *b, link_t * block);
extern frame_t * frame_addr2frame(__address addr);
 
/*
* TODO: Implement the following functions.
/kernel/trunk/generic/include/mm/slab.h
1,5 → 1,5
/*
* Copyright (C) 2005 Ondrej Palkovsky
* Copyright (C) 2006 Ondrej Palkovsky
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
38,11 → 38,8
/** If object size is less, store control structure inside SLAB */
#define SLAB_INSIDE_SIZE (PAGE_SIZE / 6)
 
/* slab_alloc constants */
#define SLAB_ATOMIC 0x1 /**< Do not sleep when no free memory,
may return NULL */
#define SLAB_NO_RECLAIM 0x2 /**< Do not try to call slab_reclaim, if no
free memory is found - avoid deadlock */
/** Maximum wasted space we allow for cache */
#define SLAB_MAX_BADNESS(cache) ((PAGE_SIZE << (cache)->order) / 4)
 
/* slab_reclaim constants */
#define SLAB_RECLAIM_ALL 0x1 /**< Reclaim all possible memory, because
54,9 → 51,9
 
typedef struct {
link_t link;
count_t busy;
count_t size;
void *objs[0];
count_t busy; /**< Count of full slots in magazine */
count_t size; /**< Number of slots in magazine */
void *objs[0]; /**< Slots in magazine */
}slab_magazine_t;
 
typedef struct {
65,24 → 62,24
SPINLOCK_DECLARE(lock);
link_t link;
/* Configuration */
size_t size;
size_t align;
size_t size; /**< Size of SLAB position - align_up(sizeof(obj)) */
int (*constructor)(void *obj, int kmflag);
void (*destructor)(void *obj);
int flags;
int flags; /**< Flags changing behaviour of cache */
 
/* Computed values */
int pages;
int objects;
__u8 order; /**< Order of frames to be allocated */
int objects; /**< Number of objects that fit in */
 
/* Statistics */
 
/* Slabs */
link_t full_slabs;
link_t partial_slabs;
link_t full_slabs; /**< List of full slabs */
link_t partial_slabs; /**< List of partial slabs */
/* Magazines */
link_t magazines;
/* CPU cache */
link_t magazines; /**< List o full magazines */
 
/** CPU cache */
struct {
slab_magazine_t *current;
slab_magazine_t *last;
90,30 → 87,22
}mag_cache[0];
}slab_cache_t;
 
typedef struct {
slab_cache_t *cache; /**< Pointer to parent cache */
void *start; /**< Start address of first available item */
count_t available; /**< Count of available items in this slab */
index_t nextavail; /**< The index of next available item */
}slab_slab_t;
extern slab_cache_t * slab_cache_create(char *name,
size_t size,
size_t align,
int (*constructor)(void *obj, int kmflag),
void (*destructor)(void *obj),
int flags);
extern void slab_cache_destroy(slab_cache_t *cache);
 
extern void * slab_alloc(slab_cache_t *cache, int flags);
extern void slab_free(slab_cache_t *cache, void *obj);
extern count_t slab_reclaim(int flags);
 
slab_cache_t * slab_cache_create(char *name,
size_t size,
size_t align,
int (*constructor)(void *obj, int kmflag),
void (*destructor)(void *obj),
int flags);
void slab_cache_destroy(slab_cache_t *cache);
 
void * slab_alloc(slab_cache_t *cache, int flags);
void slab_free(slab_cache_t *cache, void *obj);
count_t slab_reclaim(int flags);
 
/** Initialize SLAB subsytem */
void slab_cache_init(void);
extern void slab_cache_init(void);
 
/* KConsole debug */
void slab_print_list(void);
extern void slab_print_list(void);
 
#endif
/kernel/trunk/generic/src/proc/thread.c
173,9 → 173,9
spinlock_initialize(&t->lock, "thread_t_lock");
frame_ks = frame_alloc(FRAME_KA, ONE_FRAME, NULL);
frame_ks = frame_alloc(FRAME_KA, ONE_FRAME, NULL, NULL);
if (THREAD_USER_STACK & flags) {
frame_us = frame_alloc(FRAME_KA, ONE_FRAME, NULL);
frame_us = frame_alloc(FRAME_KA, ONE_FRAME, NULL,NULL);
}
 
ipl = interrupts_disable();
/kernel/trunk/generic/src/cpu/cpu.c
61,7 → 61,7
memsetb((__address) cpus, sizeof(cpu_t) * config.cpu_count, 0);
 
for (i=0; i < config.cpu_count; i++) {
cpus[i].stack = (__u8 *) frame_alloc(FRAME_KA | FRAME_PANIC, ONE_FRAME, NULL);
cpus[i].stack = (__u8 *) frame_alloc(FRAME_KA | FRAME_PANIC, ONE_FRAME, NULL, NULL);
cpus[i].id = i;
/kernel/trunk/generic/src/mm/slab.c
32,10 → 32,12
#include <memstr.h>
#include <align.h>
#include <mm/heap.h>
#include <mm/frame.h>
#include <config.h>
#include <print.h>
#include <arch.h>
#include <panic.h>
#include <debug.h>
 
SPINLOCK_INITIALIZE(slab_cache_lock);
LIST_INITIALIZE(slab_cache_list);
42,29 → 44,170
 
slab_cache_t mag_cache;
 
 
typedef struct {
slab_cache_t *cache; /**< Pointer to parent cache */
link_t link; /* List of full/partial slabs */
void *start; /**< Start address of first available item */
count_t available; /**< Count of available items in this slab */
index_t nextavail; /**< The index of next available item */
}slab_t;
 
/**************************************/
/* SLAB low level functions */
/* SLAB allocation functions */
 
/**
* Allocate frames for slab space and initialize
*
* TODO: Change slab_t allocation to slab_alloc(????), malloc with flags!!
*/
static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
{
void *data;
slab_t *slab;
size_t fsize;
int i;
zone_t *zone = NULL;
int status;
 
data = (void *)frame_alloc(FRAME_KA | flags, cache->order, &status, &zone);
if (status != FRAME_OK)
return NULL;
 
if (! cache->flags & SLAB_CACHE_SLINSIDE) {
slab = malloc(sizeof(*slab)); // , flags);
if (!slab) {
frame_free((__address)data);
return NULL;
}
} else {
fsize = (PAGE_SIZE << cache->order);
slab = data + fsize - sizeof(*slab);
}
 
/* Fill in slab structures */
/* TODO: some better way of accessing the frame, although
* the optimizer might optimize the division out :-/ */
for (i=0; i< (1<<cache->order); i++) {
ADDR2FRAME(zone, (__address)(data+i*PAGE_SIZE))->parent = slab;
}
 
slab->start = data;
slab->available = cache->objects;
slab->nextavail = 0;
 
for (i=0; i<cache->objects;i++)
*((int *) (slab->start + i*cache->size)) = i+1;
return slab;
}
 
/**
* Free space associated with SLAB
*
* @return number of freed frames
*/
static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
{
frame_free((__address)slab->start);
if (! cache->flags & SLAB_CACHE_SLINSIDE)
free(slab);
return 1 << cache->order;
}
 
/** Map object to slab structure */
static slab_t * obj2slab(void *obj)
{
frame_t *frame;
 
frame = frame_addr2frame((__address)obj);
return (slab_t *)frame->parent;
}
 
/**************************************/
/* SLAB functions */
 
 
/**
* Return object to slab and call a destructor
*
* Assume the cache->lock is held;
*
* @param slab If the caller knows directly slab of the object, otherwise NULL
*
* @return Number of freed pages
*/
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj)
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
slab_t *slab)
{
return 0;
count_t frames = 0;
 
if (!slab)
slab = obj2slab(obj);
 
spinlock_lock(cache->lock);
 
*((int *)obj) = slab->nextavail;
slab->nextavail = (obj - slab->start)/cache->size;
slab->available++;
 
/* Move it to correct list */
if (slab->available == 1) {
/* It was in full, move to partial */
list_remove(&slab->link);
list_prepend(&cache->partial_slabs, &slab->link);
}
if (slab->available == cache->objects) {
/* Free associated memory */
list_remove(&slab->link);
/* Avoid deadlock */
spinlock_unlock(&cache->lock);
frames = slab_space_free(cache, slab);
spinlock_lock(&cache->lock);
}
 
spinlock_unlock(cache->lock);
 
return frames;
}
 
 
/**
* Take new object from slab or create new if needed
*
* Assume cache->lock is held.
*
* @return Object address or null
*/
static void * slab_obj_create(slab_cache_t *cache, int flags)
{
return NULL;
slab_t *slab;
void *obj;
 
if (list_empty(&cache->partial_slabs)) {
/* Allow recursion and reclaiming
* - this should work, as the SLAB control structures
* are small and do not need to allocte with anything
* other ten frame_alloc when they are allocating,
* that's why we should get recursion at most 1-level deep
*/
spinlock_unlock(&cache->lock);
slab = slab_space_alloc(cache, flags);
spinlock_lock(&cache->lock);
if (!slab)
return NULL;
} else {
slab = list_get_instance(cache->partial_slabs.next,
slab_t,
link);
list_remove(&slab->link);
}
obj = slab->start + slab->nextavail * cache->size;
slab->nextavail = *((int *)obj);
slab->available--;
if (! slab->available)
list_prepend(&cache->full_slabs, &slab->link);
else
list_prepend(&cache->partial_slabs, &slab->link);
return obj;
}
 
/**************************************/
73,7 → 216,7
/**
* Free all objects in magazine and free memory associated with magazine
*
* Assume cpu->lock is locked
* Assume mag_cache[cpu].lock is locked
*
* @return Number of freed pages
*/
84,7 → 227,7
count_t frames = 0;
 
for (i=0;i < mag->busy; i++)
frames += slab_obj_destroy(cache, mag->objs[i]);
frames += slab_obj_destroy(cache, mag->objs[i], NULL);
slab_free(&mag_cache, mag);
 
115,7 → 258,7
mag = cache->mag_cache[CPU->id].current;
goto gotit;
}
/* If still not busy, exchange current with some frome
/* If still not busy, exchange current with some from
* other full magazines */
spinlock_lock(&cache->lock);
if (list_empty(&cache->magazines)) {
161,7 → 304,7
/* We do not want to sleep just because of caching */
/* Especially we do not want reclaiming to start, as
* this would deadlock */
mag = slab_alloc(&mag_cache, SLAB_ATOMIC | SLAB_NO_RECLAIM);
mag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
if (!mag) /* Allocation failed, give up on caching */
goto errout;
 
175,7 → 318,7
if (mag)
list_prepend(&cache->magazines, &mag->link);
 
mag = slab_alloc(&mag_cache, SLAB_ATOMIC | SLAB_NO_RECLAIM);
mag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
if (!mag)
goto errout;
198,8 → 341,30
 
 
/**************************************/
/* Top level SLAB functions */
/* SLAB CACHE functions */
 
/** Return number of objects that fit in certain cache size */
static int comp_objects(slab_cache_t *cache)
{
if (cache->flags & SLAB_CACHE_SLINSIDE)
return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
else
return (PAGE_SIZE << cache->order) / cache->size;
}
 
/** Return wasted space in slab */
static int badness(slab_cache_t *cache)
{
int objects;
int ssize;
 
objects = comp_objects(cache);
ssize = PAGE_SIZE << cache->order;
if (cache->flags & SLAB_CACHE_SLINSIDE)
ssize -= sizeof(slab_t);
return ssize - objects*cache->size;
}
 
/** Initialize allocated memory as a slab cache */
static void
_slab_cache_create(slab_cache_t *cache,
214,9 → 379,10
 
memsetb((__address)cache, sizeof(*cache), 0);
cache->name = name;
cache->align = align;
 
cache->size = ALIGN_UP(size, align);
if (align)
size = ALIGN_UP(size, align);
cache->size = size;
 
cache->constructor = constructor;
cache->destructor = destructor;
236,8 → 402,15
if (cache->size < SLAB_INSIDE_SIZE)
cache->flags |= SLAB_CACHE_SLINSIDE;
 
/* Minimum slab order */
cache->order = (cache->size / PAGE_SIZE) + 1;
while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
cache->order += 1;
}
 
cache->objects = comp_objects(cache);
 
spinlock_lock(&slab_cache_lock);
 
list_append(&cache->link, &slab_cache_list);
266,6 → 439,8
*
* @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing
* @return Number of freed pages
*
* TODO: Add light reclaim
*/
static count_t _slab_reclaim(slab_cache_t *cache, int flags)
{
283,6 → 458,8
spinlock_lock(&cache->lock);
if (flags & SLAB_RECLAIM_ALL) {
/* Aggressive memfree */
 
/* Destroy CPU magazines */
for (i=0; i<config.cpu_count; i++) {
mag = cache->mag_cache[i].current;
295,16 → 472,20
frames += magazine_destroy(cache, mag);
cache->mag_cache[i].last = NULL;
}
/* Destroy full magazines */
cur=cache->magazines.next;
while (cur!=&cache->magazines) {
mag = list_get_instance(cur, slab_magazine_t, link);
cur = cur->next;
list_remove(cur->prev);
frames += magazine_destroy(cache,mag);
}
}
/* Destroy full magazines */
cur=cache->magazines.prev;
while (cur!=&cache->magazines) {
mag = list_get_instance(cur, slab_magazine_t, link);
cur = cur->prev;
list_remove(cur->next);
frames += magazine_destroy(cache,mag);
/* If we do not do full reclaim, break
* as soon as something is freed */
if (!(flags & SLAB_RECLAIM_ALL) && frames)
break;
}
spinlock_unlock(&cache->lock);
for (i=0; i < config.cpu_count; i++)
347,8 → 528,11
if (!cache->flags & SLAB_CACHE_NOMAGAZINE)
result = magazine_obj_get(cache);
 
if (!result)
if (!result) {
spinlock_lock(&cache->lock);
result = slab_obj_create(cache, flags);
spinlock_unlock(&cache->lock);
}
 
interrupts_restore(ipl);
 
362,11 → 546,12
 
ipl = interrupts_disable();
 
if (cache->flags & SLAB_CACHE_NOMAGAZINE)
slab_obj_destroy(cache, obj);
else {
if (magazine_obj_put(cache, obj)) /* If magazine put failed */
slab_obj_destroy(cache, obj);
if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
|| magazine_obj_put(cache, obj)) {
spinlock_lock(&cache->lock);
slab_obj_destroy(cache, obj, NULL);
spinlock_unlock(&cache->lock);
}
interrupts_restore(ipl);
}
398,10 → 583,10
link_t *cur;
 
spinlock_lock(&slab_cache_lock);
printf("SLAB name\tObj size\n");
printf("SLAB name\tOsize\tOrder\n");
for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
cache = list_get_instance(cur, slab_cache_t, link);
printf("%s\t%d\n", cache->name, cache->size);
printf("%s\t%d\t%d\n", cache->name, cache->size, cache->order);
}
spinlock_unlock(&slab_cache_lock);
}
/kernel/trunk/generic/src/mm/as.c
253,7 → 253,7
* do not forget to distinguish between
* the different causes
*/
frame = frame_alloc(0, ONE_FRAME, NULL);
frame = frame_alloc(0, ONE_FRAME, NULL, NULL);
memsetb(PA2KA(frame), FRAME_SIZE, 0);
/*
/kernel/trunk/generic/src/mm/frame.c
76,18 → 76,43
frame_arch_init();
}
 
/**
* Find AND LOCK zone that can allocate order frames
*
* Assume zone_head_lock is locked.
*/
static zone_t * find_free_zone(__u8 order)
{
link_t *cur;
zone_t *z;
 
for (cur = zone_head.next; cur != &zone_head;cur = cur->next) {
z = list_get_instance(cur, zone_t, link);
spinlock_lock(&z->lock);
/* Check if the zone has 2^order frames area available */
if (buddy_system_can_alloc(z->buddy_system, order))
return z;
spinlock_unlock(&z->lock);
}
return NULL;
}
 
/** Allocate power-of-two frames of physical memory.
*
* @param flags Flags for host zone selection and address processing.
* @param order Allocate exactly 2^order frames.
* @param pzone Pointer to preferred zone pointer, on output it changes
* to the zone that the frame was really allocated to
*
* @return Allocated frame.
*/
__address frame_alloc(int flags, __u8 order, int * status)
__address frame_alloc(int flags, __u8 order, int * status, zone_t **pzone)
{
ipl_t ipl;
link_t *cur, *tmp;
zone_t *z;
link_t *tmp;
zone_t *zone = NULL;
frame_t *frame = NULL;
__address v;
99,21 → 124,23
/*
* First, find suitable frame zone.
*/
for (cur = zone_head.next; cur != &zone_head; cur = cur->next) {
z = list_get_instance(cur, zone_t, link);
spinlock_lock(&z->lock);
 
/* Check if the zone has 2^order frames area available */
if (buddy_system_can_alloc(z->buddy_system, order)) {
zone = z;
break;
}
spinlock_unlock(&z->lock);
if (pzone && *pzone) {
spinlock_lock(&(*pzone)->lock);
if (!buddy_system_can_alloc((*pzone)->buddy_system, order))
spinlock_unlock(&(*pzone)->lock);
else
zone = *pzone;
}
if (!zone) {
zone = find_free_zone(order);
/* If no memory, reclaim some slab memory,
if it does not help, reclaim all */
if (!zone && !(flags & FRAME_NO_RECLAIM))
if (slab_reclaim(0) || slab_reclaim(SLAB_RECLAIM_ALL))
zone = find_free_zone(order);
}
 
if (!zone) {
if (flags & FRAME_PANIC)
panic("Can't allocate frame.\n");
161,9 → 188,70
ASSERT(status != NULL);
*status = FRAME_OK;
}
if (pzone)
*pzone = zone;
return v;
}
 
/** Convert address to zone pointer
*
* Assume zone_head_lock is held
*
* @param addr Physical address
* @param lock If true, lock the zone
*/
static zone_t * addr2zone(__address addr, int lock)
{
link_t *cur;
zone_t *z = NULL;
 
for (cur = zone_head.next; cur != &zone_head; cur = cur->next) {
z = list_get_instance(cur, zone_t, link);
spinlock_lock(&z->lock);
/*
* Check if addr belongs to z.
*/
if ((addr >= z->base) && (addr <= z->base + (z->free_count + z->busy_count) * FRAME_SIZE)) {
if (!lock)
spinlock_unlock(&z->lock);
return z;
}
 
spinlock_unlock(&z->lock);
}
 
panic("Cannot find addr2zone: 0x%X", addr);
}
 
/** Return frame_t structure corresponding to address
*
*
*/
frame_t * frame_addr2frame(__address addr)
{
ipl_t ipl;
frame_t *frame;
zone_t *zone;
 
if (IS_KA(addr))
addr = KA2PA(addr);
 
/* Disable interrupts to avoid deadlocks with interrupt handlers */
ipl = interrupts_disable();
spinlock_lock(&zone_head_lock);
zone = addr2zone(addr,0);
frame = ADDR2FRAME(zone, addr);
 
spinlock_unlock(&zone_head_lock);
interrupts_restore(ipl);
 
return frame;
}
 
 
/** Free a frame.
*
* Find respective frame structrue for supplied addr.
175,14 → 263,15
void frame_free(__address addr)
{
ipl_t ipl;
link_t *cur;
zone_t *z;
zone_t *zone = NULL;
zone_t *zone;
frame_t *frame;
int order;
ASSERT(addr % FRAME_SIZE == 0);
if (IS_KA(addr))
addr = KA2PA(addr);
 
ipl = interrupts_disable();
spinlock_lock(&zone_head_lock);
189,27 → 278,8
/*
* First, find host frame zone for addr.
*/
for (cur = zone_head.next; cur != &zone_head; cur = cur->next) {
z = list_get_instance(cur, zone_t, link);
spinlock_lock(&z->lock);
if (IS_KA(addr))
addr = KA2PA(addr);
/*
* Check if addr belongs to z.
*/
if ((addr >= z->base) && (addr <= z->base + (z->free_count + z->busy_count) * FRAME_SIZE)) {
zone = z;
break;
}
 
spinlock_unlock(&z->lock);
}
zone = addr2zone(addr, 1); /* This locks the zone automatically */
ASSERT(zone != NULL);
frame = ADDR2FRAME(zone, addr);
/* remember frame order */
/kernel/trunk/arch/ia64/src/mm/page.c
89,7 → 89,7
/*
* Allocate VHPT and invalidate all its entries.
*/
page_ht = (pte_t *) frame_alloc(FRAME_KA, VHPT_WIDTH - FRAME_WIDTH, NULL);
page_ht = (pte_t *) frame_alloc(FRAME_KA, VHPT_WIDTH - FRAME_WIDTH, NULL, NULL);
memsetb((__address) page_ht, VHPT_SIZE, 0);
ht_invalidate_all();