/kernel/branches/falloc_bad/generic/src/mm/as.c |
---|
0,0 → 1,306 |
/* |
* Copyright (C) 2001-2006 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/* |
* This file contains address space manipulation functions. |
* Roughly speaking, this is a higher-level client of |
* Virtual Address Translation (VAT) subsystem. |
*/ |
#include <mm/as.h> |
#include <mm/page.h> |
#include <mm/frame.h> |
#include <mm/tlb.h> |
#include <mm/heap.h> |
#include <arch/mm/page.h> |
#include <genarch/mm/page_pt.h> |
#include <arch/mm/asid.h> |
#include <arch/mm/as.h> |
#include <arch/types.h> |
#include <typedefs.h> |
#include <synch/spinlock.h> |
#include <config.h> |
#include <list.h> |
#include <panic.h> |
#include <arch/asm.h> |
#include <debug.h> |
#include <memstr.h> |
#include <arch.h> |
#include <print.h> |
#define KAS_START_INDEX PTL0_INDEX(KERNEL_ADDRESS_SPACE_START) |
#define KAS_END_INDEX PTL0_INDEX(KERNEL_ADDRESS_SPACE_END) |
#define KAS_INDICES (1+(KAS_END_INDEX-KAS_START_INDEX)) |
/* |
* Here we assume that PFN (Physical Frame Number) space |
* is smaller than the width of index_t. UNALLOCATED_PFN |
* can be then used to mark mappings wich were not |
* yet allocated a physical frame. |
*/ |
#define UNALLOCATED_PFN ((index_t) -1) |
/** Create address space. */ |
/* |
* FIXME: this interface must be meaningful for all possible VAT |
* (Virtual Address Translation) mechanisms. |
*/ |
as_t *as_create(pte_t *ptl0) |
{ |
as_t *as; |
as = (as_t *) malloc(sizeof(as_t)); |
if (as) { |
spinlock_initialize(&as->lock, "as_lock"); |
list_initialize(&as->as_area_head); |
as->asid = asid_get(); |
as->ptl0 = ptl0; |
if (!as->ptl0) { |
pte_t *src_ptl0, *dst_ptl0; |
src_ptl0 = (pte_t *) PA2KA((__address) GET_PTL0_ADDRESS()); |
dst_ptl0 = (pte_t *) frame_alloc(FRAME_KA | FRAME_PANIC, ONE_FRAME, NULL); |
// memsetb((__address) dst_ptl0, PAGE_SIZE, 0); |
// memcpy((void *) &dst_ptl0[KAS_START_INDEX], (void *) &src_ptl0[KAS_START_INDEX], KAS_INDICES); |
memcpy((void *) dst_ptl0,(void *) src_ptl0, PAGE_SIZE); |
as->ptl0 = (pte_t *) KA2PA((__address) dst_ptl0); |
} |
} |
return as; |
} |
/** Create address space area of common attributes. |
* |
* The created address space area is added to the target address space. |
* |
* @param as Target address space. |
* @param type Type of area. |
* @param size Size of area in multiples of PAGE_SIZE. |
* @param base Base address of area. |
* |
* @return Address space area on success or NULL on failure. |
*/ |
as_area_t *as_area_create(as_t *as, as_area_type_t type, size_t size, __address base) |
{ |
ipl_t ipl; |
as_area_t *a; |
if (base % PAGE_SIZE) |
panic("addr not aligned to a page boundary"); |
ipl = interrupts_disable(); |
spinlock_lock(&as->lock); |
/* |
* TODO: test as_area which is to be created doesn't overlap with an existing one. |
*/ |
a = (as_area_t *) malloc(sizeof(as_area_t)); |
if (a) { |
int i; |
a->mapping = (index_t *) malloc(size * sizeof(index_t)); |
if (!a->mapping) { |
free(a); |
spinlock_unlock(&as->lock); |
interrupts_restore(ipl); |
return NULL; |
} |
for (i=0; i<size; i++) { |
/* |
* Frames will be allocated on-demand by |
* as_page_fault() or preloaded by |
* as_area_set_mapping(). |
*/ |
a->mapping[i] = UNALLOCATED_PFN; |
} |
spinlock_initialize(&a->lock, "as_area_lock"); |
link_initialize(&a->link); |
a->type = type; |
a->size = size; |
a->base = base; |
list_append(&a->link, &as->as_area_head); |
} |
spinlock_unlock(&as->lock); |
interrupts_restore(ipl); |
return a; |
} |
/** Load mapping for address space area. |
* |
* Initialize a->mapping. |
* |
* @param a Target address space area. |
* @param vpn Page number relative to area start. |
* @param pfn Frame number to map. |
*/ |
void as_area_set_mapping(as_area_t *a, index_t vpn, index_t pfn) |
{ |
ASSERT(vpn < a->size); |
ASSERT(a->mapping[vpn] == UNALLOCATED_PFN); |
ASSERT(pfn != UNALLOCATED_PFN); |
ipl_t ipl; |
ipl = interrupts_disable(); |
spinlock_lock(&a->lock); |
a->mapping[vpn] = pfn; |
spinlock_unlock(&a->lock); |
interrupts_restore(ipl); |
} |
/** Handle page fault within the current address space. |
* |
* This is the high-level page fault handler. |
* Interrupts are assumed disabled. |
* |
* @param page Faulting page. |
* |
* @return 0 on page fault, 1 on success. |
*/ |
int as_page_fault(__address page) |
{ |
int flags; |
link_t *cur; |
as_area_t *a, *area = NULL; |
index_t vpn; |
__address frame; |
ASSERT(AS); |
spinlock_lock(&AS->lock); |
/* |
* Search this areas of this address space for presence of 'page'. |
*/ |
for (cur = AS->as_area_head.next; cur != &AS->as_area_head; cur = cur->next) { |
a = list_get_instance(cur, as_area_t, link); |
spinlock_lock(&a->lock); |
if ((page >= a->base) && (page < a->base + a->size * PAGE_SIZE)) { |
/* |
* We found the area containing 'page'. |
* TODO: access checking |
*/ |
vpn = (page - a->base) / PAGE_SIZE; |
area = a; |
break; |
} |
spinlock_unlock(&a->lock); |
} |
if (!area) { |
/* |
* No area contained mapping for 'page'. |
* Signal page fault to low-level handler. |
*/ |
spinlock_unlock(&AS->lock); |
return 0; |
} |
/* |
* Note: area->lock is held. |
*/ |
/* |
* Decide if a frame needs to be allocated. |
* If so, allocate it and adjust area->mapping map. |
*/ |
if (area->mapping[vpn] == UNALLOCATED_PFN) { |
frame = frame_alloc(0, ONE_FRAME, NULL); |
memsetb(PA2KA(frame), FRAME_SIZE, 0); |
area->mapping[vpn] = frame / FRAME_SIZE; |
ASSERT(area->mapping[vpn] != UNALLOCATED_PFN); |
} else |
frame = area->mapping[vpn] * FRAME_SIZE; |
switch (area->type) { |
case AS_AREA_TEXT: |
flags = PAGE_EXEC | PAGE_READ | PAGE_USER | PAGE_PRESENT | PAGE_CACHEABLE; |
break; |
case AS_AREA_DATA: |
case AS_AREA_STACK: |
flags = PAGE_READ | PAGE_WRITE | PAGE_USER | PAGE_PRESENT | PAGE_CACHEABLE; |
break; |
default: |
panic("unexpected as_area_type_t %d", area->type); |
} |
/* |
* Map 'page' to 'frame'. |
* Note that TLB shootdown is not attempted as only new information is being |
* inserted into page tables. |
*/ |
page_mapping_insert(page, AS->asid, frame, flags, (__address) AS->ptl0); |
spinlock_unlock(&area->lock); |
spinlock_unlock(&AS->lock); |
return 1; |
} |
/** Install address space on CPU. |
* |
* @param as Address space. |
*/ |
void as_install(as_t *as) |
{ |
ipl_t ipl; |
ipl = interrupts_disable(); |
spinlock_lock(&as->lock); |
ASSERT(as->ptl0); |
SET_PTL0_ADDRESS(as->ptl0); |
spinlock_unlock(&as->lock); |
interrupts_restore(ipl); |
/* |
* Perform architecture-specific steps. |
* (e.g. invalidate TLB, install ASID etc.) |
*/ |
as_install_arch(as); |
AS = as; |
} |
/kernel/branches/falloc_bad/generic/src/mm/frame.c |
---|
0,0 → 1,560 |
/* |
* Copyright (C) 2001-2005 Jakub Jermar |
* Copyright (C) 2005 Sergey Bondari |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <typedefs.h> |
#include <arch/types.h> |
#include <mm/heap.h> |
#include <mm/frame.h> |
#include <mm/as.h> |
#include <panic.h> |
#include <debug.h> |
#include <list.h> |
#include <synch/spinlock.h> |
#include <arch/asm.h> |
#include <arch.h> |
#include <print.h> |
#include <align.h> |
SPINLOCK_INITIALIZE(zone_head_lock); /**< this lock protects zone_head list */ |
LIST_INITIALIZE(zone_head); /**< list of all zones in the system */ |
/** Blacklist containing non-available areas of memory. |
* |
* This blacklist is used to exclude frames that cannot be allocated |
* (e.g. kernel memory) from available memory map. |
*/ |
region_t zone_blacklist[ZONE_BLACKLIST_SIZE]; |
count_t zone_blacklist_count = 0; |
static struct buddy_system_operations zone_buddy_system_operations = { |
.find_buddy = zone_buddy_find_buddy, |
.bisect = zone_buddy_bisect, |
.coalesce = zone_buddy_coalesce, |
.set_order = zone_buddy_set_order, |
.get_order = zone_buddy_get_order, |
.mark_busy = zone_buddy_mark_busy, |
}; |
/** Initialize physical memory management |
* |
* Initialize physical memory managemnt. |
*/ |
void frame_init(void) |
{ |
if (config.cpu_active == 1) { |
frame_region_not_free(KA2PA(config.base), config.kernel_size); |
if (config.init_size > 0) |
frame_region_not_free(KA2PA(config.init_addr), config.init_size); |
} |
frame_arch_init(); |
} |
/** Allocate power-of-two frames of physical memory. |
* |
* @param flags Flags for host zone selection and address processing. |
* @param order Allocate exactly 2^order frames. |
* |
* @return Allocated frame. |
*/ |
__address frame_alloc(int flags, __u8 order, int * status) |
{ |
ipl_t ipl; |
link_t *cur, *tmp; |
zone_t *z; |
zone_t *zone = NULL; |
frame_t *frame = NULL; |
__address v; |
loop: |
ipl = interrupts_disable(); |
spinlock_lock(&zone_head_lock); |
/* |
* First, find suitable frame zone. |
*/ |
for (cur = zone_head.next; cur != &zone_head; cur = cur->next) { |
z = list_get_instance(cur, zone_t, link); |
spinlock_lock(&z->lock); |
/* Check if the zone has 2^order frames area available */ |
if (buddy_system_can_alloc(z->buddy_system, order)) { |
zone = z; |
break; |
} |
spinlock_unlock(&z->lock); |
} |
if (!zone) { |
if (flags & FRAME_PANIC) |
panic("Can't allocate frame.\n"); |
/* |
* TODO: Sleep until frames are available again. |
*/ |
spinlock_unlock(&zone_head_lock); |
interrupts_restore(ipl); |
if (flags & FRAME_NON_BLOCKING) { |
ASSERT(status != NULL); |
*status = FRAME_NO_MEMORY; |
return NULL; |
} |
panic("Sleep not implemented.\n"); |
goto loop; |
} |
/* Allocate frames from zone buddy system */ |
tmp = buddy_system_alloc(zone->buddy_system, order); |
ASSERT(tmp); |
/* Update zone information. */ |
zone->free_count -= (1 << order); |
zone->busy_count += (1 << order); |
/* Frame will be actually a first frame of the block. */ |
frame = list_get_instance(tmp, frame_t, buddy_link); |
/* get frame address */ |
v = FRAME2ADDR(zone, frame); |
spinlock_unlock(&zone->lock); |
spinlock_unlock(&zone_head_lock); |
interrupts_restore(ipl); |
if (flags & FRAME_KA) |
v = PA2KA(v); |
if (flags & FRAME_NON_BLOCKING) { |
ASSERT(status != NULL); |
*status = FRAME_OK; |
} |
return v; |
} |
/** Free a frame. |
* |
* Find respective frame structrue for supplied addr. |
* Decrement frame reference count. |
* If it drops to zero, move the frame structure to free list. |
* |
* @param addr Address of the frame to be freed. It must be a multiple of FRAME_SIZE. |
*/ |
void frame_free(__address addr) |
{ |
ipl_t ipl; |
link_t *cur; |
zone_t *z; |
zone_t *zone = NULL; |
frame_t *frame; |
ASSERT(addr % FRAME_SIZE == 0); |
ipl = interrupts_disable(); |
spinlock_lock(&zone_head_lock); |
/* |
* First, find host frame zone for addr. |
*/ |
for (cur = zone_head.next; cur != &zone_head; cur = cur->next) { |
z = list_get_instance(cur, zone_t, link); |
spinlock_lock(&z->lock); |
if (IS_KA(addr)) |
addr = KA2PA(addr); |
/* |
* Check if addr belongs to z. |
*/ |
if ((addr >= z->base) && (addr <= z->base + (z->free_count + z->busy_count) * FRAME_SIZE)) { |
zone = z; |
break; |
} |
spinlock_unlock(&z->lock); |
} |
ASSERT(zone != NULL); |
frame = ADDR2FRAME(zone, addr); |
ASSERT(frame->refcount); |
if (!--frame->refcount) { |
buddy_system_free(zone->buddy_system, &frame->buddy_link); |
} |
/* Update zone information. */ |
zone->free_count += (1 << frame->buddy_order); |
zone->busy_count -= (1 << frame->buddy_order); |
spinlock_unlock(&zone->lock); |
spinlock_unlock(&zone_head_lock); |
interrupts_restore(ipl); |
} |
/** Mark frame region not free. |
* |
* Mark frame region not free. |
* |
* @param base Base address of non-available region. |
* @param size Size of non-available region. |
*/ |
void frame_region_not_free(__address base, size_t size) |
{ |
index_t index; |
index = zone_blacklist_count++; |
/* Force base to the nearest lower address frame boundary. */ |
base = ALIGN_DOWN(base, FRAME_SIZE); |
/* Align size to frame boundary. */ |
size = ALIGN_UP(size, FRAME_SIZE); |
ASSERT(index < ZONE_BLACKLIST_SIZE); |
zone_blacklist[index].base = base; |
zone_blacklist[index].size = size; |
} |
/** Create frame zones in region of available memory. |
* |
* Avoid any black listed areas of non-available memory. |
* Assume that the black listed areas cannot overlap |
* one another or cross available memory region boundaries. |
* |
* @param base Base address of available memory region. |
* @param size Size of the region. |
*/ |
void zone_create_in_region(__address base, size_t size) { |
int i; |
zone_t * z; |
__address s; |
size_t sz; |
ASSERT(base % FRAME_SIZE == 0); |
ASSERT(size % FRAME_SIZE == 0); |
if (!size) |
return; |
for (i = 0; i < zone_blacklist_count; i++) { |
if (zone_blacklist[i].base >= base && zone_blacklist[i].base < base + size) { |
s = base; sz = zone_blacklist[i].base - base; |
ASSERT(base != s || sz != size); |
zone_create_in_region(s, sz); |
s = zone_blacklist[i].base + zone_blacklist[i].size; |
sz = (base + size) - (zone_blacklist[i].base + zone_blacklist[i].size); |
ASSERT(base != s || sz != size); |
zone_create_in_region(s, sz); |
return; |
} |
} |
z = zone_create(base, size, 0); |
if (!z) { |
panic("Cannot allocate zone (base=%P, size=%d).\n", base, size); |
} |
zone_attach(z); |
} |
/** Create frame zone |
* |
* Create new frame zone. |
* |
* @param start Physical address of the first frame within the zone. |
* @param size Size of the zone. Must be a multiple of FRAME_SIZE. |
* @param flags Zone flags. |
* |
* @return Initialized zone. |
*/ |
zone_t * zone_create(__address start, size_t size, int flags) |
{ |
zone_t *z; |
count_t cnt; |
int i; |
__u8 max_order; |
ASSERT(start % FRAME_SIZE == 0); |
ASSERT(size % FRAME_SIZE == 0); |
cnt = size / FRAME_SIZE; |
z = (zone_t *) early_malloc(sizeof(zone_t)); |
if (z) { |
link_initialize(&z->link); |
spinlock_initialize(&z->lock, "zone_lock"); |
z->base = start; |
z->flags = flags; |
z->free_count = cnt; |
z->busy_count = 0; |
z->frames = (frame_t *) early_malloc(cnt * sizeof(frame_t)); |
if (!z->frames) { |
early_free(z); |
return NULL; |
} |
for (i = 0; i<cnt; i++) { |
frame_initialize(&z->frames[i], z); |
} |
/* |
* Create buddy system for the zone |
*/ |
for (max_order = 0; cnt >> max_order; max_order++) |
; |
z->buddy_system = buddy_system_create(max_order, &zone_buddy_system_operations, (void *) z); |
/* Stuffing frames */ |
for (i = 0; i<cnt; i++) { |
z->frames[i].refcount = 0; |
buddy_system_free(z->buddy_system, &z->frames[i].buddy_link); |
} |
} |
return z; |
} |
/** Attach frame zone |
* |
* Attach frame zone to zone list. |
* |
* @param zone Zone to be attached. |
*/ |
void zone_attach(zone_t *zone) |
{ |
ipl_t ipl; |
ipl = interrupts_disable(); |
spinlock_lock(&zone_head_lock); |
list_append(&zone->link, &zone_head); |
spinlock_unlock(&zone_head_lock); |
interrupts_restore(ipl); |
} |
/** Initialize frame structure |
* |
* Initialize frame structure. |
* |
* @param frame Frame structure to be initialized. |
* @param zone Host frame zone. |
*/ |
void frame_initialize(frame_t *frame, zone_t *zone) |
{ |
frame->refcount = 1; |
frame->buddy_order = 0; |
} |
/** Buddy system find_buddy implementation |
* |
* @param b Buddy system. |
* @param block Block for which buddy should be found |
* |
* @return Buddy for given block if found |
*/ |
link_t * zone_buddy_find_buddy(buddy_system_t *b, link_t * block) { |
frame_t * frame; |
zone_t * zone; |
index_t index; |
bool is_left, is_right; |
frame = list_get_instance(block, frame_t, buddy_link); |
zone = (zone_t *) b->data; |
ASSERT(IS_BUDDY_ORDER_OK(FRAME_INDEX(zone, frame), frame->buddy_order)); |
is_left = IS_BUDDY_LEFT_BLOCK(zone, frame); |
is_right = IS_BUDDY_RIGHT_BLOCK(zone, frame); |
ASSERT(is_left ^ is_right); |
if (is_left) { |
index = (FRAME_INDEX(zone, frame)) + (1 << frame->buddy_order); |
} else { // if (is_right) |
index = (FRAME_INDEX(zone, frame)) - (1 << frame->buddy_order); |
} |
if (FRAME_INDEX_VALID(zone, index)) { |
if ( zone->frames[index].buddy_order == frame->buddy_order && |
zone->frames[index].refcount == 0) { |
return &zone->frames[index].buddy_link; |
} |
} |
return NULL; |
} |
/** Buddy system bisect implementation |
* |
* @param b Buddy system. |
* @param block Block to bisect |
* |
* @return right block |
*/ |
link_t * zone_buddy_bisect(buddy_system_t *b, link_t * block) { |
frame_t * frame_l, * frame_r; |
frame_l = list_get_instance(block, frame_t, buddy_link); |
frame_r = (frame_l + (1 << (frame_l->buddy_order - 1))); |
return &frame_r->buddy_link; |
} |
/** Buddy system coalesce implementation |
* |
* @param b Buddy system. |
* @param block_1 First block |
* @param block_2 First block's buddy |
* |
* @return Coalesced block (actually block that represents lower address) |
*/ |
link_t * zone_buddy_coalesce(buddy_system_t *b, link_t * block_1, link_t * block_2) { |
frame_t * frame1, * frame2; |
frame1 = list_get_instance(block_1, frame_t, buddy_link); |
frame2 = list_get_instance(block_2, frame_t, buddy_link); |
return frame1 < frame2 ? block_1 : block_2; |
} |
/** Buddy system set_order implementation |
* |
* @param b Buddy system. |
* @param block Buddy system block |
* @param order Order to set |
*/ |
void zone_buddy_set_order(buddy_system_t *b, link_t * block, __u8 order) { |
frame_t * frame; |
frame = list_get_instance(block, frame_t, buddy_link); |
frame->buddy_order = order; |
} |
/** Buddy system get_order implementation |
* |
* @param b Buddy system. |
* @param block Buddy system block |
* |
* @return Order of block |
*/ |
__u8 zone_buddy_get_order(buddy_system_t *b, link_t * block) { |
frame_t * frame; |
frame = list_get_instance(block, frame_t, buddy_link); |
return frame->buddy_order; |
} |
/** Buddy system mark_busy implementation |
* |
* @param b Buddy system |
* @param block Buddy system block |
* |
*/ |
void zone_buddy_mark_busy(buddy_system_t *b, link_t * block) { |
frame_t * frame; |
frame = list_get_instance(block, frame_t, buddy_link); |
frame->refcount = 1; |
} |
/** Prints list of zones |
* |
*/ |
void zone_print_list(void) { |
zone_t *zone = NULL; |
link_t *cur; |
ipl_t ipl; |
ipl = interrupts_disable(); |
spinlock_lock(&zone_head_lock); |
printf("Base address\tFree Frames\tBusy Frames\n"); |
printf("------------\t-----------\t-----------\n"); |
for (cur = zone_head.next; cur != &zone_head; cur = cur->next) { |
zone = list_get_instance(cur, zone_t, link); |
spinlock_lock(&zone->lock); |
printf("%L\t%d\t\t%d\n",zone->base, zone->free_count, zone->busy_count); |
spinlock_unlock(&zone->lock); |
} |
spinlock_unlock(&zone_head_lock); |
interrupts_restore(ipl); |
} |
/** Prints zone details |
* |
* @param base Zone base address |
*/ |
void zone_print_one(__address base) { |
zone_t *zone = NULL, *z ; |
link_t *cur; |
ipl_t ipl; |
ipl = interrupts_disable(); |
spinlock_lock(&zone_head_lock); |
for (cur = zone_head.next; cur != &zone_head; cur = cur->next) { |
z = list_get_instance(cur, zone_t, link); |
if (base == z->base) { |
zone = z; |
break; |
} |
} |
if (!zone) { |
spinlock_unlock(&zone_head_lock); |
interrupts_restore(ipl); |
printf("No zone with address %X\n", base); |
return; |
} |
spinlock_lock(&zone->lock); |
printf("Memory zone information\n\n"); |
printf("Zone base address: %P\n", zone->base); |
printf("Zone size: %d frames (%dK)\n", zone->free_count + zone->busy_count, ((zone->free_count + zone->busy_count) * FRAME_SIZE) >> 10); |
printf("Allocated space: %d frames (%dK)\n", zone->busy_count, (zone->busy_count * FRAME_SIZE) >> 10); |
printf("Available space: %d (%dK)\n", zone->free_count, (zone->free_count * FRAME_SIZE) >> 10); |
printf("\nBuddy allocator structures:\n\n"); |
buddy_system_structure_print(zone->buddy_system, FRAME_SIZE); |
spinlock_unlock(&zone->lock); |
spinlock_unlock(&zone_head_lock); |
interrupts_restore(ipl); |
} |
/kernel/branches/falloc_bad/generic/src/mm/page.c |
---|
0,0 → 1,110 |
/* |
* Copyright (C) 2001-2006 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/* |
* Virtual Address Translation subsystem. |
*/ |
#include <mm/page.h> |
#include <arch/mm/page.h> |
#include <arch/mm/asid.h> |
#include <mm/asid.h> |
#include <mm/frame.h> |
#include <arch/types.h> |
#include <typedefs.h> |
#include <arch/asm.h> |
#include <memstr.h> |
#include <debug.h> |
#include <arch.h> |
/** Virtual operations for page subsystem. */ |
page_operations_t *page_operations = NULL; |
void page_init(void) |
{ |
page_arch_init(); |
page_mapping_insert(0x0, 0, 0x0, PAGE_NOT_PRESENT, 0); |
} |
/** Map memory structure |
* |
* Identity-map memory structure |
* considering possible crossings |
* of page boundaries. |
* |
* @param s Address of the structure. |
* @param size Size of the structure. |
*/ |
void map_structure(__address s, size_t size) |
{ |
int i, cnt, length; |
length = size + (s - (s & ~(PAGE_SIZE-1))); |
cnt = length/PAGE_SIZE + (length%PAGE_SIZE>0); |
for (i = 0; i < cnt; i++) |
page_mapping_insert(s + i*PAGE_SIZE, ASID_KERNEL, s + i*PAGE_SIZE, PAGE_NOT_CACHEABLE, 0); |
} |
/** Map page to frame |
* |
* Map virtual address 'page' to physical address 'frame' |
* using 'flags'. Allocate and setup any missing page tables. |
* |
* @param page Virtual address of the page to be mapped. |
* @param asid Address space to wich page belongs. |
* @param frame Physical address of memory frame to which the mapping is done. |
* @param flags Flags to be used for mapping. |
* @param root Explicit PTL0 address. |
*/ |
void page_mapping_insert(__address page, asid_t asid, __address frame, int flags, __address root) |
{ |
ASSERT(page_operations); |
ASSERT(page_operations->mapping_insert); |
page_operations->mapping_insert(page, asid, frame, flags, root); |
} |
/** Find mapping for virtual page |
* |
* Find mapping for virtual page. |
* |
* @param page Virtual page. |
* @param asid Address space to wich page belongs. |
* @param root PTL0 address if non-zero. |
* |
* @return NULL if there is no such mapping; requested mapping otherwise. |
*/ |
pte_t *page_mapping_find(__address page, asid_t asid, __address root) |
{ |
ASSERT(page_operations); |
ASSERT(page_operations->mapping_find); |
return page_operations->mapping_find(page, asid, root); |
} |
/kernel/branches/falloc_bad/generic/src/mm/buddy.c |
---|
0,0 → 1,263 |
/* |
* Copyright (C) 2005 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <mm/buddy.h> |
#include <mm/frame.h> |
#include <mm/heap.h> |
#include <arch/types.h> |
#include <typedefs.h> |
#include <list.h> |
#include <debug.h> |
#include <print.h> |
/** Create buddy system |
* |
* Allocate memory for and initialize new buddy system. |
* |
* @param max_order The biggest allocable size will be 2^max_order. |
* @param op Operations for new buddy system. |
* @param data Pointer to be used by implementation. |
* |
* @return New buddy system. |
*/ |
buddy_system_t *buddy_system_create(__u8 max_order, buddy_system_operations_t *op, void *data) |
{ |
buddy_system_t *b; |
int i; |
ASSERT(max_order < BUDDY_SYSTEM_INNER_BLOCK); |
ASSERT(op->find_buddy); |
ASSERT(op->set_order); |
ASSERT(op->get_order); |
ASSERT(op->bisect); |
ASSERT(op->coalesce); |
ASSERT(op->mark_busy); |
/* |
* Allocate memory for structure describing the whole buddy system. |
*/ |
b = (buddy_system_t *) early_malloc(sizeof(buddy_system_t)); |
if (b) { |
/* |
* Allocate memory for all orders this buddy system will work with. |
*/ |
b->order = (link_t *) early_malloc(max_order * sizeof(link_t)); |
if (!b->order) { |
early_free(b); |
return NULL; |
} |
for (i = 0; i < max_order; i++) |
list_initialize(&b->order[i]); |
b->max_order = max_order; |
b->op = op; |
b->data = data; |
} |
return b; |
} |
/** Check if buddy system can allocate block |
* |
* @param b Buddy system pointer |
* @param i Size of the block (2^i) |
* |
* @return True if block can be allocated |
*/ |
bool buddy_system_can_alloc(buddy_system_t *b, __u8 i) { |
__u8 k; |
ASSERT(i < b->max_order); |
for (k=i; k < b->max_order; k++) { |
if (!list_empty(&b->order[k])) { |
return true; |
} |
} |
return false; |
} |
/** Allocate block from buddy system. |
* |
* @param b Buddy system pointer. |
* @param i Returned block will be 2^i big. |
* |
* @return Block of data represented by link_t. |
*/ |
link_t *buddy_system_alloc(buddy_system_t *b, __u8 i) |
{ |
link_t *res, *hlp; |
ASSERT(i < b->max_order); |
/* |
* If the list of order i is not empty, |
* the request can be immediatelly satisfied. |
*/ |
if (!list_empty(&b->order[i])) { |
res = b->order[i].next; |
list_remove(res); |
b->op->mark_busy(b, res); |
return res; |
} |
/* |
* If order i is already the maximal order, |
* the request cannot be satisfied. |
*/ |
if (i == b->max_order - 1) |
return NULL; |
/* |
* Try to recursively satisfy the request from higher order lists. |
*/ |
hlp = buddy_system_alloc(b, i + 1); |
/* |
* The request could not be satisfied |
* from higher order lists. |
*/ |
if (!hlp) |
return NULL; |
res = hlp; |
/* |
* Bisect the block and set order of both of its parts to i. |
*/ |
hlp = b->op->bisect(b, res); |
b->op->set_order(b, res, i); |
b->op->set_order(b, hlp, i); |
/* |
* Return the other half to buddy system. |
* PROBLEM!!!! FILL FIND OTHER PART AS BUDDY AND LINK TOGETHER |
*/ |
b->op->mark_busy(b, res); |
buddy_system_free(b, hlp); |
return res; |
} |
/** Return block to buddy system. |
* |
* @param b Buddy system pointer. |
* @param block Block to return. |
*/ |
void buddy_system_free(buddy_system_t *b, link_t *block) |
{ |
link_t *buddy, *hlp; |
__u8 i; |
/* |
* Determine block's order. |
*/ |
i = b->op->get_order(b, block); |
ASSERT(i < b->max_order); |
if (i != b->max_order - 1) { |
/* |
* See if there is any buddy in the list of order i. |
*/ |
buddy = b->op->find_buddy(b, block); |
if (buddy) { |
ASSERT(b->op->get_order(b, buddy) == i); |
/* |
* Remove buddy from the list of order i. |
*/ |
list_remove(buddy); |
/* |
* Invalidate order of both block and buddy. |
*/ |
b->op->set_order(b, block, BUDDY_SYSTEM_INNER_BLOCK); |
b->op->set_order(b, buddy, BUDDY_SYSTEM_INNER_BLOCK); |
/* |
* Coalesce block and buddy into one block. |
*/ |
hlp = b->op->coalesce(b, block, buddy); |
/* |
* Set order of the coalesced block to i + 1. |
*/ |
b->op->set_order(b, hlp, i + 1); |
/* |
* Recursively add the coalesced block to the list of order i + 1. |
*/ |
buddy_system_free(b, hlp); |
return; |
} |
} |
/* |
* Insert block into the list of order i. |
*/ |
list_append(block, &b->order[i]); |
} |
/** Prints out structure of buddy system |
* |
* @param b Pointer to buddy system |
* @param es Element size |
*/ |
void buddy_system_structure_print(buddy_system_t *b, size_t elem_size) { |
index_t i; |
count_t cnt, elem_count = 0, block_count = 0; |
link_t * cur; |
printf("Order\tBlocks\tSize \tBlock size\tElems per block\n"); |
printf("-----\t------\t--------\t----------\t---------------\n"); |
for (i=0;i < b->max_order; i++) { |
cnt = 0; |
if (!list_empty(&b->order[i])) { |
for (cur = b->order[i].next; cur != &b->order[i]; cur = cur->next) |
cnt++; |
} |
printf("#%d\t%d\t%dK\t\t%dK\t\t%d\n", i, cnt, (cnt * (1 << i) * elem_size) >> 10, ((1 << i) * elem_size) >> 10, 1 << i); |
block_count += cnt; |
elem_count += (1 << i) * cnt; |
} |
printf("-----\t------\t--------\t----------\t---------------\n"); |
printf("Buddy system contains %d free elements (%d blocks)\n" , elem_count, block_count); |
} |
/kernel/branches/falloc_bad/generic/src/mm/tlb.c |
---|
0,0 → 1,83 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <mm/tlb.h> |
#include <arch/mm/tlb.h> |
#include <smp/ipi.h> |
#include <synch/spinlock.h> |
#include <typedefs.h> |
#include <arch/atomic.h> |
#include <arch/interrupt.h> |
#include <config.h> |
#include <arch.h> |
#include <panic.h> |
SPINLOCK_INITIALIZE(tlblock); |
void tlb_init(void) |
{ |
tlb_arch_init(); |
} |
#ifdef CONFIG_SMP |
/* must be called with interrupts disabled */ |
void tlb_shootdown_start(void) |
{ |
int i; |
CPU->tlb_active = 0; |
spinlock_lock(&tlblock); |
tlb_shootdown_ipi_send(); |
tlb_invalidate(0); /* TODO: use valid ASID */ |
busy_wait: |
for (i = 0; i<config.cpu_count; i++) |
if (cpus[i].tlb_active) |
goto busy_wait; |
} |
void tlb_shootdown_finalize(void) |
{ |
spinlock_unlock(&tlblock); |
CPU->tlb_active = 1; |
} |
void tlb_shootdown_ipi_send(void) |
{ |
ipi_broadcast(VECTOR_TLB_SHOOTDOWN_IPI); |
} |
void tlb_shootdown_ipi_recv(void) |
{ |
CPU->tlb_active = 0; |
spinlock_lock(&tlblock); |
spinlock_unlock(&tlblock); |
tlb_invalidate(0); /* TODO: use valid ASID */ |
CPU->tlb_active = 1; |
} |
#endif /* CONFIG_SMP */ |
/kernel/branches/falloc_bad/generic/src/mm/heap.c |
---|
0,0 → 1,157 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <mm/heap.h> |
#include <synch/spinlock.h> |
#include <func.h> |
#include <memstr.h> |
#include <panic.h> |
#include <arch/types.h> |
#include <arch/asm.h> |
#include <arch.h> |
#include <align.h> |
/* |
* First-fit algorithm. |
* Simple, but hopefully correct. |
* Chunks being freed are tested for mergability with their neighbours. |
*/ |
static chunk_t *chunk0; |
SPINLOCK_INITIALIZE(heaplock); |
void early_heap_init(__address heap, size_t size) |
{ |
memsetb(heap, size, 0); |
chunk0 = (chunk_t *) heap; |
chunk0->used = 0; |
chunk0->size = size - sizeof(chunk_t); |
chunk0->next = NULL; |
chunk0->prev = NULL; |
} |
/* |
* Uses first-fit algorithm. |
*/ |
void *early_malloc(size_t size) |
{ |
ipl_t ipl; |
chunk_t *x, *y, *z; |
if (size == 0) |
panic("zero-size allocation request"); |
size = ALIGN_UP(size, sizeof(__native)); |
x = chunk0; |
ipl = interrupts_disable(); |
spinlock_lock(&heaplock); |
while (x) { |
if (x->used || x->size < size) { |
x = x->next; |
continue; |
} |
x->used = 1; |
/* |
* If the chunk exactly matches required size or if truncating |
* it would not provide enough space for storing a new chunk |
* header plus at least one byte of data, we are finished. |
*/ |
if (x->size < size + sizeof(chunk_t) + 1) { |
spinlock_unlock(&heaplock); |
interrupts_restore(ipl); |
return &x->data[0]; |
} |
/* |
* Truncate x and create a new chunk. |
*/ |
y = (chunk_t *) (((__address) x) + size + sizeof(chunk_t)); |
y->used = 0; |
y->size = x->size - size - sizeof(chunk_t); |
y->prev = x; |
y->next = NULL; |
z = x->next; |
if (z) { |
z->prev = y; |
y->next = z; |
} |
x->size = size; |
x->next = y; |
spinlock_unlock(&heaplock); |
interrupts_restore(ipl); |
return &x->data[0]; |
} |
spinlock_unlock(&heaplock); |
interrupts_restore(ipl); |
return NULL; |
} |
void early_free(void *ptr) |
{ |
ipl_t ipl; |
chunk_t *x, *y, *z; |
if (!ptr) |
panic("free on NULL"); |
y = (chunk_t *) (((__u8 *) ptr) - sizeof(chunk_t)); |
if (y->used != 1) |
panic("freeing unused/damaged chunk"); |
ipl = interrupts_disable(); |
spinlock_lock(&heaplock); |
x = y->prev; |
z = y->next; |
/* merge x and y */ |
if (x && !x->used) { |
x->size += y->size + sizeof(chunk_t); |
x->next = z; |
if (z) |
z->prev = x; |
y = x; |
} |
/* merge y and z or merge (x merged with y) and z */ |
if (z && !z->used) { |
y->size += z->size + sizeof(chunk_t); |
y->next = z->next; |
if (z->next) { |
/* y is either y or x */ |
z->next->prev = y; |
} |
} |
y->used = 0; |
spinlock_unlock(&heaplock); |
interrupts_restore(ipl); |
} |