/SPARTAN/trunk/src/mm/tlb.c |
---|
0,0 → 1,34 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <mm/tlb.h> |
void tlb_shutdown(void) |
{ |
/* TODO: implement tlb_shutdown */ |
} |
/SPARTAN/trunk/src/mm/vm.c |
---|
0,0 → 1,178 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <mm/vm.h> |
#include <mm/page.h> |
#include <mm/frame.h> |
#include <arch/mm/page.h> |
#include <arch/types.h> |
#include <typedefs.h> |
#include <synch/spinlock.h> |
#include <config.h> |
#include <list.h> |
#include <panic.h> |
vm_t *vm_create(void) |
{ |
vm_t *m; |
m = (vm_t *) malloc(sizeof(vm_t)); |
if (m) { |
spinlock_initialize(&m->lock); |
list_initialize(&m->vm_area_head); |
} |
return m; |
} |
void vm_destroy(vm_t *m) |
{ |
} |
vm_area_t *vm_area_create(vm_t *m, vm_type_t type, int size, __address addr) |
{ |
pri_t pri; |
vm_area_t *a; |
if (addr % PAGE_SIZE) |
panic(PANIC "addr not aligned to a page boundary"); |
pri = cpu_priority_high(); |
spinlock_lock(&m->lock); |
/* |
* TODO: test vm_area which is to be created doesn't overlap with an existing one. |
*/ |
a = (vm_area_t *) malloc(sizeof(vm_area_t)); |
if (a) { |
int i; |
a->mapping = (__address *) malloc(size * sizeof(__address)); |
if (!a->mapping) { |
free(a); |
spinlock_unlock(&m->lock); |
cpu_priority_restore(pri); |
return NULL; |
} |
for (i=0; i<size; i++) |
a->mapping[i] = frame_alloc(0); |
spinlock_initialize(&a->lock); |
link_initialize(&a->link); |
a->type = type; |
a->size = size; |
a->address = addr; |
list_append(&a->link, &m->vm_area_head); |
} |
spinlock_unlock(&m->lock); |
cpu_priority_restore(pri); |
return a; |
} |
void vm_area_destroy(vm_area_t *a) |
{ |
} |
void vm_area_map(vm_area_t *a) |
{ |
int i, flags; |
pri_t pri; |
pri = cpu_priority_high(); |
spinlock_lock(&a->lock); |
switch (a->type) { |
case VMA_TEXT: |
flags = PAGE_EXEC | PAGE_READ | PAGE_USER | PAGE_PRESENT | PAGE_CACHEABLE; |
break; |
case VMA_DATA: |
case VMA_STACK: |
flags = PAGE_READ | PAGE_WRITE | PAGE_USER | PAGE_PRESENT | PAGE_CACHEABLE; |
break; |
default: |
panic(PANIC "unexpected vm_type_t %d", a->type); |
} |
for (i=0; i<a->size; i++) |
map_page_to_frame(a->address + i*PAGE_SIZE, a->mapping[i], flags, 0); |
spinlock_unlock(&a->lock); |
cpu_priority_restore(pri); |
} |
void vm_area_unmap(vm_area_t *a) |
{ |
int i; |
pri_t pri; |
pri = cpu_priority_high(); |
spinlock_lock(&a->lock); |
for (i=0; i<a->size; i++) |
map_page_to_frame(a->address + i*PAGE_SIZE, 0, PAGE_NOT_PRESENT, 0); |
spinlock_unlock(&a->lock); |
cpu_priority_restore(pri); |
} |
void vm_install(vm_t *m) |
{ |
link_t *l; |
pri_t pri; |
pri = cpu_priority_high(); |
spinlock_lock(&m->lock); |
for(l = m->vm_area_head.next; l != &m->vm_area_head; l = l->next) |
vm_area_map(list_get_instance(l, vm_area_t, link)); |
spinlock_unlock(&m->lock); |
cpu_priority_restore(pri); |
} |
void vm_uninstall(vm_t *m) |
{ |
link_t *l; |
pri_t pri; |
pri = cpu_priority_high(); |
spinlock_lock(&m->lock); |
for(l = m->vm_area_head.next; l != &m->vm_area_head; l = l->next) |
vm_area_unmap(list_get_instance(l, vm_area_t, link)); |
spinlock_unlock(&m->lock); |
cpu_priority_restore(pri); |
} |
/SPARTAN/trunk/src/mm/frame.c |
---|
0,0 → 1,251 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <arch/types.h> |
#include <func.h> |
#include <mm/heap.h> |
#include <mm/frame.h> |
#include <mm/page.h> |
#include <mm/vm.h> |
#include <arch/mm/page.h> |
#include <config.h> |
#include <memstr.h> |
#include <panic.h> |
#include <synch/spinlock.h> |
__u32 frames; |
__u32 frames_free; |
__u8 *frame_bitmap; |
__u32 frame_bitmap_octets; |
/* |
* This is for kernel address space frames (allocated with FRAME_KA). |
* Their addresses may not interfere with user address space. |
*/ |
__u8 *frame_kernel_bitmap; |
__u32 kernel_frames; |
__u32 kernel_frames_free; |
static spinlock_t framelock; |
void frame_init(void) |
{ |
if (config.cpu_active == 1) { |
/* |
* The bootstrap processor will allocate all necessary memory for frame allocation. |
*/ |
frames = config.memory_size / FRAME_SIZE; |
frame_bitmap_octets = frames / 8 + (frames % 8 > 0); |
frame_bitmap = (__u8 *) malloc(frame_bitmap_octets); |
if (!frame_bitmap) |
panic(PANIC "malloc/frame_bitmap\n"); |
/* |
* Mark all frames free. |
*/ |
memsetb((__address) frame_bitmap, frame_bitmap_octets, 0); |
frames_free = frames; |
/* |
* Will be properly set up by architecture dependent frame init. |
*/ |
frame_kernel_bitmap = NULL; |
kernel_frames_free = 0; |
kernel_frames = 0; |
} |
/* |
* No frame allocations/reservations prior this point. |
*/ |
frame_arch_init(); |
if (config.cpu_active == 1) { |
/* |
* Create the memory address space map. Marked frames and frame |
* regions cannot be used for allocation. |
*/ |
frame_region_not_free(config.base, config.base + config.kernel_size); |
} |
} |
/* |
* Allocate a frame. |
*/ |
__address frame_alloc(int flags) |
{ |
int i; |
pri_t pri; |
__u8 **frame_bitmap_ptr = &frame_bitmap; |
__u32 *frames_ptr = &frames, *frames_free_ptr = &frames_free; |
if (flags & FRAME_KA) { |
frame_bitmap_ptr = &frame_kernel_bitmap; |
frames_ptr = &kernel_frames; |
frames_free_ptr = &kernel_frames_free; |
} |
loop: |
pri = cpu_priority_high(); |
spinlock_lock(&framelock); |
if (*frames_free_ptr) { |
for (i=0; i < *frames_ptr; i++) { |
int m, n; |
m = i / 8; |
n = i % 8; |
if (((*frame_bitmap_ptr)[m] & (1<<n)) == 0) { |
(*frame_bitmap_ptr)[m] |= (1<<n); |
*frames_free_ptr--; |
if (flags & FRAME_KA) { |
/* |
* frames_free_ptr points to kernel_frames_free |
* It is still necessary to decrement frames_free. |
*/ |
frames_free--; |
} |
spinlock_unlock(&framelock); |
cpu_priority_restore(pri); |
if (flags & FRAME_KA) return PA2KA(i*FRAME_SIZE); |
return i*FRAME_SIZE; |
} |
} |
panic(PANIC "frames_free inconsistent (%d)\n", frames_free); |
} |
spinlock_unlock(&framelock); |
cpu_priority_restore(pri); |
if (flags & FRAME_PANIC) |
panic(PANIC "unable to allocate frame\n"); |
/* TODO: implement sleeping logic here */ |
panic(PANIC "sleep not supported\n"); |
goto loop; |
} |
/* |
* Free a frame. |
*/ |
void frame_free(__address addr) |
{ |
pri_t pri; |
__u32 frame; |
__u32 *frames_free_ptr = &frames_free, *frames_ptr = &frames; |
__u8 **frame_bitmap_ptr = &frame_bitmap; |
if (IS_KA(addr)) { |
frames_free_ptr = &kernel_frames_free; |
frame_bitmap_ptr = &frame_kernel_bitmap; |
} |
pri = cpu_priority_high(); |
spinlock_lock(&framelock); |
frame = IS_KA(addr) ? KA2PA(addr) : addr; |
frame /= FRAME_SIZE; |
if (frame < *frames_ptr) { |
int m, n; |
m = frame / 8; |
n = frame % 8; |
if ((*frame_bitmap_ptr)[m] & (1<<n)) { |
(*frame_bitmap_ptr)[m] &= ~(1<<n); |
*frames_free_ptr++; |
if (IS_KA(addr)) { |
/* |
* frames_free_ptr points to kernel_frames_free |
* It is still necessary to increment frames_free. |
*/ |
frames_free++; |
} |
} |
else panic(PANIC "frame_free: frame already free\n"); |
} |
else panic(PANIC "frame_free: frame number too big\n"); |
spinlock_unlock(&framelock); |
cpu_priority_restore(pri); |
} |
/* |
* Don't use this function for normal allocation. Use frame_alloc() instead. |
* Use this function to declare that some special frame is not free. |
*/ |
void frame_not_free(__address addr) |
{ |
pri_t pri; |
__u32 frame; |
__u32 *frames_ptr = &frames, *frames_free_ptr = &frames_free; |
__u8 **frame_bitmap_ptr = &frame_bitmap; |
pri = cpu_priority_high(); |
spinlock_lock(&framelock); |
frame = IS_KA(addr) ? KA2PA(addr) : addr; |
frame /= FRAME_SIZE; |
if (frame < *frames_ptr) { |
int m, n; |
m = frame / 8; |
n = frame % 8; |
if (((*frame_bitmap_ptr)[m] & (1<<n)) == 0) { |
(*frame_bitmap_ptr)[m] |= (1<<n); |
*frames_free_ptr--; |
if (IS_KA(addr)) { |
/* |
* frames_free_ptr points to kernel_frames_free |
* It is still necessary to decrement frames_free. |
*/ |
frames_free--; |
} |
} |
} |
spinlock_unlock(&framelock); |
cpu_priority_restore(pri); |
} |
void frame_region_not_free(__address start, __address stop) |
{ |
__u32 i; |
start /= FRAME_SIZE; |
stop /= FRAME_SIZE; |
for (i = start; i <= stop; i++) |
frame_not_free(i * FRAME_SIZE); |
} |
/SPARTAN/trunk/src/mm/page.c |
---|
0,0 → 1,36 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <mm/page.h> |
#include <arch/mm/page.h> |
void page_init(void) |
{ |
map_page_to_frame(0x0, 0x0, PAGE_NOT_PRESENT, 0); |
page_arch_init(); |
} |
/SPARTAN/trunk/src/mm/heap.c |
---|
0,0 → 1,151 |
/* |
* Copyright (C) 2001-2004 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <mm/heap.h> |
#include <synch/spinlock.h> |
#include <func.h> |
#include <memstr.h> |
#include <arch/types.h> |
/* |
* First-fit algorithm. |
* Simple, but hopefully correct. |
* Chunks being freed are tested for mergability with their neighbours. |
*/ |
static chunk_t *chunk0; |
static spinlock_t heaplock; |
void heap_init(__address heap, int size) |
{ |
spinlock_initialize(&heaplock); |
memsetb(heap,size,0); |
chunk0 = (chunk_t *) heap; |
chunk0->used = 0; |
chunk0->size = size - sizeof(chunk_t); |
chunk0->next = NULL; |
chunk0->prev = NULL; |
} |
/* |
* Uses first-fit algorithm. |
*/ |
void *malloc(int size) |
{ |
pri_t pri; |
chunk_t *x, *y, *z; |
if (size == 0) |
panic("malloc: zero-size allocation request"); |
x = chunk0; |
pri = cpu_priority_high(); |
spinlock_lock(&heaplock); |
while (x) { |
if (x->used || x->size < size) { |
x = x->next; |
continue; |
} |
x->used = 1; |
/* |
* If the chunk exactly matches required size or if truncating |
* it would not provide enough space for storing a new chunk |
* header plus at least one byte of data, we are finished. |
*/ |
if (x->size < size + sizeof(chunk_t) + 1) { |
spinlock_unlock(&heaplock); |
cpu_priority_restore(pri); |
return &x->data[0]; |
} |
/* |
* Truncate x and create a new chunk. |
*/ |
y = (chunk_t *) (((__address) x) + size + sizeof(chunk_t)); |
y->used = 0; |
y->size = x->size - size - sizeof(chunk_t); |
y->prev = x; |
y->next = NULL; |
if (z = x->next) { |
z->prev = y; |
y->next = z; |
} |
x->size = size; |
x->next = y; |
spinlock_unlock(&heaplock); |
cpu_priority_restore(pri); |
return &x->data[0]; |
} |
spinlock_unlock(&heaplock); |
cpu_priority_restore(pri); |
return NULL; |
} |
void free(void *ptr) |
{ |
pri_t pri; |
chunk_t *x, *y, *z; |
if (!ptr) |
panic("free on NULL"); |
y = (chunk_t *) (((__u8 *) ptr) - sizeof(chunk_t)); |
if (y->used != 1) |
panic("freeing unused/damaged chunk"); |
pri = cpu_priority_high(); |
spinlock_lock(&heaplock); |
x = y->prev; |
z = y->next; |
/* merge x and y */ |
if (x && !x->used) { |
x->size += y->size + sizeof(chunk_t); |
x->next = z; |
if (z) |
z->prev = x; |
y = x; |
} |
/* merge y and z or merge (x merged with y) and z */ |
if (z && !z->used) { |
y->size += z->size + sizeof(chunk_t); |
y->next = z->next; |
if (z->next) { |
/* y is either y or x */ |
z->next->prev = y; |
} |
} |
y->used = 0; |
spinlock_unlock(&heaplock); |
cpu_priority_restore(pri); |
} |