Subversion Repositories HelenOS-historic

Compare Revisions

Problem with comparison.

Ignore whitespace Rev HEAD → Rev 368

/SPARTAN/trunk/src/mm/frame.c
0,0 → 1,364
/*
* Copyright (C) 2001-2004 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <typedefs.h>
#include <arch/types.h>
#include <mm/heap.h>
#include <mm/frame.h>
#include <mm/vm.h>
#include <panic.h>
#include <debug.h>
#include <list.h>
#include <synch/spinlock.h>
#include <arch/asm.h>
#include <arch.h>
 
spinlock_t zone_head_lock; /**< this lock protects zone_head list */
link_t zone_head; /**< list of all zones in the system */
 
/** Initialize physical memory management
*
* Initialize physical memory managemnt.
*/
void frame_init(void)
{
if (config.cpu_active == 1) {
zone_init();
}
 
frame_arch_init();
if (config.cpu_active == 1) {
frame_region_not_free(config.base, config.base + config.kernel_size + CONFIG_STACK_SIZE);
}
}
 
/** Allocate a frame
*
* Allocate a frame of physical memory.
*
* @param flags Flags for host zone selection and address processing.
*
* @return Allocated frame.
*/
__address frame_alloc(int flags)
{
pri_t pri;
link_t *cur, *tmp;
zone_t *z;
zone_t *zone = NULL;
frame_t *frame = NULL;
__address v;
loop:
pri = cpu_priority_high();
spinlock_lock(&zone_head_lock);
/*
* First, find suitable frame zone.
*/
for (cur = zone_head.next; cur != &zone_head; cur = cur->next) {
z = list_get_instance(cur, zone_t, link);
spinlock_lock(&z->lock);
/*
* Check if the zone has any free frames.
*/
if (z->free_count) {
zone = z;
break;
}
spinlock_unlock(&z->lock);
}
if (!zone) {
if (flags & FRAME_PANIC)
panic("Can't allocate frame.\n");
/*
* TODO: Sleep until frames are available again.
*/
spinlock_unlock(&zone_head_lock);
cpu_priority_restore(pri);
 
panic("Sleep not implemented.\n");
goto loop;
}
tmp = zone->free_head.next;
frame = list_get_instance(tmp, frame_t, link);
 
frame->refcount++;
list_remove(tmp); /* remove frame from free_head */
list_append(tmp, &zone->busy_head); /* append frame to busy_head */
zone->free_count--;
zone->busy_count++;
v = zone->base + (frame - zone->frames) * FRAME_SIZE;
if (flags & FRAME_KA)
v = PA2KA(v);
spinlock_unlock(&zone->lock);
spinlock_unlock(&zone_head_lock);
cpu_priority_restore(pri);
return v;
}
 
/** Free a frame.
*
* Find respective frame structrue for supplied addr.
* Decrement frame reference count.
* If it drops to zero, move the frame structure to free list.
*
* @param addr Address of the frame to be freed. It must be a multiple of FRAME_SIZE.
*/
void frame_free(__address addr)
{
pri_t pri;
link_t *cur;
zone_t *z;
zone_t *zone = NULL;
frame_t *frame;
ASSERT(addr % FRAME_SIZE == 0);
pri = cpu_priority_high();
spinlock_lock(&zone_head_lock);
/*
* First, find host frame zone for addr.
*/
for (cur = zone_head.next; cur != &zone_head; cur = cur->next) {
z = list_get_instance(cur, zone_t, link);
spinlock_lock(&z->lock);
if (IS_KA(addr))
addr = KA2PA(addr);
/*
* Check if addr belongs to z.
*/
if ((addr >= z->base) && (addr <= z->base + (z->free_count + z->busy_count) * FRAME_SIZE)) {
zone = z;
break;
}
spinlock_unlock(&z->lock);
}
ASSERT(zone != NULL);
frame = &zone->frames[(addr - zone->base)/FRAME_SIZE];
ASSERT(frame->refcount);
 
if (!--frame->refcount) {
list_remove(&frame->link); /* remove frame from busy_head */
list_append(&frame->link, &zone->free_head); /* append frame to free_head */
zone->free_count++;
zone->busy_count--;
}
spinlock_unlock(&zone->lock);
spinlock_unlock(&zone_head_lock);
cpu_priority_restore(pri);
}
 
/** Mark frame not free.
*
* Find respective frame structrue for supplied addr.
* Increment frame reference count and move the frame structure to busy list.
*
* @param addr Address of the frame to be marked. It must be a multiple of FRAME_SIZE.
*/
void frame_not_free(__address addr)
{
pri_t pri;
link_t *cur;
zone_t *z;
zone_t *zone = NULL;
frame_t *frame;
ASSERT(addr % FRAME_SIZE == 0);
pri = cpu_priority_high();
spinlock_lock(&zone_head_lock);
/*
* First, find host frame zone for addr.
*/
for (cur = zone_head.next; cur != &zone_head; cur = cur->next) {
z = list_get_instance(cur, zone_t, link);
spinlock_lock(&z->lock);
if (IS_KA(addr))
addr = KA2PA(addr);
/*
* Check if addr belongs to z.
*/
if ((addr >= z->base) && (addr <= z->base + (z->free_count + z->busy_count) * FRAME_SIZE)) {
zone = z;
break;
}
spinlock_unlock(&z->lock);
}
ASSERT(zone != NULL);
frame = &zone->frames[(addr - zone->base)/FRAME_SIZE];
 
if (!frame->refcount) {
frame->refcount++;
 
list_remove(&frame->link); /* remove frame from free_head */
list_append(&frame->link, &zone->busy_head); /* append frame to busy_head */
zone->free_count--;
zone->busy_count++;
}
spinlock_unlock(&zone->lock);
spinlock_unlock(&zone_head_lock);
cpu_priority_restore(pri);
}
 
/** Mark frame region not free.
*
* Mark frame region not free.
*
* @param start First address.
* @param stop Last address.
*/
void frame_region_not_free(__address start, __address stop)
{
__address a;
 
start /= FRAME_SIZE;
stop /= FRAME_SIZE;
for (a = start; a <= stop; a++)
frame_not_free(a * FRAME_SIZE);
}
 
 
/** Initialize zonekeeping
*
* Initialize zonekeeping.
*/
void zone_init(void)
{
spinlock_initialize(&zone_head_lock);
list_initialize(&zone_head);
}
 
/** Create frame zone
*
* Create new frame zone.
*
* @param start Physical address of the first frame within the zone.
* @param size Size of the zone. Must be a multiple of FRAME_SIZE.
* @param flags Zone flags.
*
* @return Initialized zone.
*/
zone_t *zone_create(__address start, size_t size, int flags)
{
zone_t *z;
count_t cnt;
int i;
ASSERT(start % FRAME_SIZE == 0);
ASSERT(size % FRAME_SIZE == 0);
cnt = size / FRAME_SIZE;
z = (zone_t *) malloc(sizeof(zone_t));
if (z) {
link_initialize(&z->link);
spinlock_initialize(&z->lock);
z->base = start;
z->flags = flags;
 
z->free_count = cnt;
list_initialize(&z->free_head);
 
z->busy_count = 0;
list_initialize(&z->busy_head);
z->frames = (frame_t *) malloc(cnt * sizeof(frame_t));
if (!z->frames) {
free(z);
return NULL;
}
for (i = 0; i<cnt; i++) {
frame_initialize(&z->frames[i], z);
list_append(&z->frames[i].link, &z->free_head);
}
}
return z;
}
 
/** Attach frame zone
*
* Attach frame zone to zone list.
*
* @param zone Zone to be attached.
*/
void zone_attach(zone_t *zone)
{
pri_t pri;
pri = cpu_priority_high();
spinlock_lock(&zone_head_lock);
list_append(&zone->link, &zone_head);
spinlock_unlock(&zone_head_lock);
cpu_priority_restore(pri);
}
 
/** Initialize frame structure
*
* Initialize frame structure.
*
* @param frame Frame structure to be initialized.
* @param zone Host frame zone.
*/
void frame_initialize(frame_t *frame, zone_t *zone)
{
frame->refcount = 0;
link_initialize(&frame->link);
}
/SPARTAN/trunk/src/mm/vm.c
0,0 → 1,202
/*
* Copyright (C) 2001-2004 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <mm/vm.h>
#include <mm/page.h>
#include <mm/frame.h>
#include <mm/tlb.h>
#include <mm/heap.h>
#include <arch/mm/page.h>
#include <arch/types.h>
#include <typedefs.h>
#include <synch/spinlock.h>
#include <config.h>
#include <list.h>
#include <panic.h>
#include <arch/asm.h>
#include <debug.h>
#include <memstr.h>
#include <arch.h>
 
#define KAS_START_INDEX PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)
#define KAS_END_INDEX PTL0_INDEX(KERNEL_ADDRESS_SPACE_END)
#define KAS_INDICES (1+(KAS_END_INDEX-KAS_START_INDEX))
 
vm_t *vm_create(pte_t *ptl0)
{
vm_t *m;
 
m = (vm_t *) malloc(sizeof(vm_t));
if (m) {
spinlock_initialize(&m->lock);
list_initialize(&m->vm_area_head);
 
/*
* Each vm_t is supposed to have its own page table.
* It is either passed one or it has to allocate and set one up.
*/
m->ptl0 = ptl0;
if (!m->ptl0) {
pte_t *src_ptl0, *dst_ptl0;
src_ptl0 = (pte_t *) PA2KA((__address) GET_PTL0_ADDRESS());
dst_ptl0 = (pte_t *) frame_alloc(FRAME_KA | FRAME_PANIC);
 
// memsetb((__address) dst_ptl0, PAGE_SIZE, 0);
// memcpy((void *) &dst_ptl0[KAS_START_INDEX], (void *) &src_ptl0[KAS_START_INDEX], KAS_INDICES);
memcpy((void *) dst_ptl0,(void *) src_ptl0, PAGE_SIZE);
 
m->ptl0 = (pte_t *) KA2PA((__address) dst_ptl0);
}
}
 
return m;
}
 
void vm_destroy(vm_t *m)
{
}
 
vm_area_t *vm_area_create(vm_t *m, vm_type_t type, size_t size, __address addr)
{
pri_t pri;
vm_area_t *a;
if (addr % PAGE_SIZE)
panic("addr not aligned to a page boundary");
pri = cpu_priority_high();
spinlock_lock(&m->lock);
/*
* TODO: test vm_area which is to be created doesn't overlap with an existing one.
*/
a = (vm_area_t *) malloc(sizeof(vm_area_t));
if (a) {
int i;
a->mapping = (__address *) malloc(size * sizeof(__address));
if (!a->mapping) {
free(a);
spinlock_unlock(&m->lock);
cpu_priority_restore(pri);
return NULL;
}
for (i=0; i<size; i++)
a->mapping[i] = frame_alloc(0);
spinlock_initialize(&a->lock);
link_initialize(&a->link);
a->type = type;
a->size = size;
a->address = addr;
list_append(&a->link, &m->vm_area_head);
 
}
 
spinlock_unlock(&m->lock);
cpu_priority_restore(pri);
return a;
}
 
void vm_area_destroy(vm_area_t *a)
{
}
 
void vm_area_map(vm_area_t *a, vm_t *m)
{
int i, flags;
pri_t pri;
pri = cpu_priority_high();
spinlock_lock(&m->lock);
spinlock_lock(&a->lock);
 
switch (a->type) {
case VMA_TEXT:
flags = PAGE_EXEC | PAGE_READ | PAGE_USER | PAGE_PRESENT | PAGE_CACHEABLE;
break;
case VMA_DATA:
case VMA_STACK:
flags = PAGE_READ | PAGE_WRITE | PAGE_USER | PAGE_PRESENT | PAGE_CACHEABLE;
break;
default:
panic("unexpected vm_type_t %d", a->type);
}
 
ASSERT(m->ptl0);
for (i=0; i<a->size; i++)
map_page_to_frame(a->address + i*PAGE_SIZE, a->mapping[i], flags, (__address) m->ptl0);
spinlock_unlock(&a->lock);
spinlock_unlock(&m->lock);
cpu_priority_restore(pri);
}
 
void vm_area_unmap(vm_area_t *a, vm_t *m)
{
int i;
pri_t pri;
pri = cpu_priority_high();
spinlock_lock(&m->lock);
spinlock_lock(&a->lock);
 
ASSERT(m->ptl0);
for (i=0; i<a->size; i++)
map_page_to_frame(a->address + i*PAGE_SIZE, 0, PAGE_NOT_PRESENT, (__address) m->ptl0);
spinlock_unlock(&a->lock);
spinlock_unlock(&m->lock);
cpu_priority_restore(pri);
}
 
void vm_install(vm_t *m)
{
link_t *l;
pri_t pri;
pri = cpu_priority_high();
 
tlb_shootdown_start();
spinlock_lock(&m->lock);
 
ASSERT(m->ptl0);
SET_PTL0_ADDRESS(m->ptl0);
 
spinlock_unlock(&m->lock);
tlb_shootdown_finalize();
 
cpu_priority_restore(pri);
}
/SPARTAN/trunk/src/mm/page.c
0,0 → 1,111
/*
* Copyright (C) 2001-2004 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <mm/page.h>
#include <mm/frame.h>
#include <arch/mm/page.h>
#include <arch/types.h>
#include <typedefs.h>
#include <arch/asm.h>
#include <memstr.h>
 
 
void page_init(void)
{
page_arch_init();
map_page_to_frame(0x0, 0x0, PAGE_NOT_PRESENT, 0);
}
 
/** Map memory structure
*
* Identity-map memory structure
* considering possible crossings
* of page boundaries.
*
* @param s Address of the structure.
* @param size Size of the structure.
*/
void map_structure(__address s, size_t size)
{
int i, cnt, length;
 
length = size + (s - (s & ~(PAGE_SIZE-1)));
cnt = length/PAGE_SIZE + (length%PAGE_SIZE>0);
 
for (i = 0; i < cnt; i++)
map_page_to_frame(s + i*PAGE_SIZE, s + i*PAGE_SIZE, PAGE_NOT_CACHEABLE, 0);
 
}
 
/** Map page to frame
*
* Map virtual address 'page' to physical address 'frame'
* using 'flags'. Allocate and setup any missing page tables.
*
* @param page Virtual address of the page to be mapped.
* @param frame Physical address of memory frame to which the mapping is done.
* @param flags Flags to be used for mapping.
* @param root Explicit PTL0 address.
*/
void map_page_to_frame(__address page, __address frame, int flags, __address root)
{
pte_t *ptl0, *ptl1, *ptl2, *ptl3;
__address newpt;
 
ptl0 = (pte_t *) PA2KA(root ? root : (__address) GET_PTL0_ADDRESS());
 
if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT) {
newpt = frame_alloc(FRAME_KA);
memsetb(newpt, PAGE_SIZE, 0);
SET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page), KA2PA(newpt));
SET_PTL1_FLAGS(ptl0, PTL0_INDEX(page), PAGE_PRESENT | PAGE_USER | PAGE_EXEC);
}
 
ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
 
if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT) {
newpt = frame_alloc(FRAME_KA);
memsetb(newpt, PAGE_SIZE, 0);
SET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page), KA2PA(newpt));
SET_PTL2_FLAGS(ptl1, PTL1_INDEX(page), PAGE_PRESENT | PAGE_USER | PAGE_EXEC);
}
 
ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
 
if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT) {
newpt = frame_alloc(FRAME_KA);
memsetb(newpt, PAGE_SIZE, 0);
SET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page), KA2PA(newpt));
SET_PTL3_FLAGS(ptl2, PTL2_INDEX(page), PAGE_PRESENT | PAGE_USER | PAGE_EXEC);
}
 
ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
 
SET_FRAME_ADDRESS(ptl3, PTL3_INDEX(page), frame);
SET_FRAME_FLAGS(ptl3, PTL3_INDEX(page), flags);
}
/SPARTAN/trunk/src/mm/heap.c
0,0 → 1,154
/*
* Copyright (C) 2001-2004 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <mm/heap.h>
#include <synch/spinlock.h>
#include <func.h>
#include <memstr.h>
#include <panic.h>
#include <arch/types.h>
#include <arch/asm.h>
#include <arch.h>
 
/*
* First-fit algorithm.
* Simple, but hopefully correct.
* Chunks being freed are tested for mergability with their neighbours.
*/
 
static chunk_t *chunk0;
static spinlock_t heaplock;
 
void heap_init(__address heap, size_t size)
{
spinlock_initialize(&heaplock);
memsetb(heap, size, 0);
chunk0 = (chunk_t *) heap;
chunk0->used = 0;
chunk0->size = size - sizeof(chunk_t);
chunk0->next = NULL;
chunk0->prev = NULL;
}
 
/*
* Uses first-fit algorithm.
*/
void *malloc(size_t size)
{
pri_t pri;
chunk_t *x, *y, *z;
 
if (size == 0)
panic("zero-size allocation request");
x = chunk0;
pri = cpu_priority_high();
spinlock_lock(&heaplock);
while (x) {
if (x->used || x->size < size) {
x = x->next;
continue;
}
x->used = 1;
/*
* If the chunk exactly matches required size or if truncating
* it would not provide enough space for storing a new chunk
* header plus at least one byte of data, we are finished.
*/
if (x->size < size + sizeof(chunk_t) + 1) {
spinlock_unlock(&heaplock);
cpu_priority_restore(pri);
return &x->data[0];
}
 
/*
* Truncate x and create a new chunk.
*/
y = (chunk_t *) (((__address) x) + size + sizeof(chunk_t));
y->used = 0;
y->size = x->size - size - sizeof(chunk_t);
y->prev = x;
y->next = NULL;
if (z = x->next) {
z->prev = y;
y->next = z;
}
x->size = size;
x->next = y;
spinlock_unlock(&heaplock);
cpu_priority_restore(pri);
 
return &x->data[0];
}
spinlock_unlock(&heaplock);
cpu_priority_restore(pri);
return NULL;
}
 
void free(void *ptr)
{
pri_t pri;
chunk_t *x, *y, *z;
 
if (!ptr)
panic("free on NULL");
 
 
y = (chunk_t *) (((__u8 *) ptr) - sizeof(chunk_t));
if (y->used != 1)
panic("freeing unused/damaged chunk");
 
pri = cpu_priority_high();
spinlock_lock(&heaplock);
x = y->prev;
z = y->next;
/* merge x and y */
if (x && !x->used) {
x->size += y->size + sizeof(chunk_t);
x->next = z;
if (z)
z->prev = x;
y = x;
}
/* merge y and z or merge (x merged with y) and z */
if (z && !z->used) {
y->size += z->size + sizeof(chunk_t);
y->next = z->next;
if (z->next) {
/* y is either y or x */
z->next->prev = y;
}
}
y->used = 0;
spinlock_unlock(&heaplock);
cpu_priority_restore(pri);
}
/SPARTAN/trunk/src/mm/tlb.c
0,0 → 1,81
/*
* Copyright (C) 2001-2004 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <mm/tlb.h>
#include <smp/ipi.h>
#include <synch/spinlock.h>
#include <typedefs.h>
#include <arch/atomic.h>
#include <arch/interrupt.h>
#include <config.h>
#include <arch.h>
 
#ifdef __SMP__
static spinlock_t tlblock;
 
void tlb_init(void)
{
spinlock_initialize(&tlblock);
}
 
/* must be called with interrupts disabled */
void tlb_shootdown_start(void)
{
int i;
 
CPU->tlb_active = 0;
spinlock_lock(&tlblock);
tlb_shootdown_ipi_send();
tlb_invalidate(0); /* TODO: use valid ASID */
busy_wait:
for (i = 0; i<config.cpu_count; i++)
if (cpus[i].tlb_active)
goto busy_wait;
}
 
void tlb_shootdown_finalize(void)
{
spinlock_unlock(&tlblock);
CPU->tlb_active = 1;
}
 
void tlb_shootdown_ipi_send(void)
{
ipi_broadcast(VECTOR_TLB_SHOOTDOWN_IPI);
}
 
void tlb_shootdown_ipi_recv(void)
{
CPU->tlb_active = 0;
spinlock_lock(&tlblock);
spinlock_unlock(&tlblock);
tlb_invalidate(0); /* TODO: use valid ASID */
CPU->tlb_active = 1;
}
#endif /* __SMP__ */