Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 1786 → Rev 1787

/trunk/kernel/genarch/src/mm/page_pt.c
0,0 → 1,270
/*
* Copyright (C) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup genarchmm
* @{
*/
 
/**
* @file
* @brief Virtual Address Translation for hierarchical 4-level page tables.
*/
 
#include <genarch/mm/page_pt.h>
#include <mm/page.h>
#include <mm/frame.h>
#include <mm/as.h>
#include <arch/mm/page.h>
#include <arch/mm/as.h>
#include <arch/types.h>
#include <typedefs.h>
#include <arch/asm.h>
#include <memstr.h>
 
static void pt_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int flags);
static void pt_mapping_remove(as_t *as, uintptr_t page);
static pte_t *pt_mapping_find(as_t *as, uintptr_t page);
 
page_mapping_operations_t pt_mapping_operations = {
.mapping_insert = pt_mapping_insert,
.mapping_remove = pt_mapping_remove,
.mapping_find = pt_mapping_find
};
 
/** Map page to frame using hierarchical page tables.
*
* Map virtual address page to physical address frame
* using flags.
*
* The page table must be locked and interrupts must be disabled.
*
* @param as Address space to wich page belongs.
* @param page Virtual address of the page to be mapped.
* @param frame Physical address of memory frame to which the mapping is done.
* @param flags Flags to be used for mapping.
*/
void pt_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int flags)
{
pte_t *ptl0, *ptl1, *ptl2, *ptl3;
pte_t *newpt;
 
ptl0 = (pte_t *) PA2KA((uintptr_t) as->page_table);
 
if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT) {
newpt = (pte_t *)frame_alloc(ONE_FRAME, FRAME_KA);
memsetb((uintptr_t)newpt, PAGE_SIZE, 0);
SET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page), KA2PA(newpt));
SET_PTL1_FLAGS(ptl0, PTL0_INDEX(page), PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | PAGE_WRITE);
}
 
ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
 
if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT) {
newpt = (pte_t *)frame_alloc(ONE_FRAME, FRAME_KA);
memsetb((uintptr_t)newpt, PAGE_SIZE, 0);
SET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page), KA2PA(newpt));
SET_PTL2_FLAGS(ptl1, PTL1_INDEX(page), PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | PAGE_WRITE);
}
 
ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
 
if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT) {
newpt = (pte_t *)frame_alloc(ONE_FRAME, FRAME_KA);
memsetb((uintptr_t)newpt, PAGE_SIZE, 0);
SET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page), KA2PA(newpt));
SET_PTL3_FLAGS(ptl2, PTL2_INDEX(page), PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | PAGE_WRITE);
}
 
ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
 
SET_FRAME_ADDRESS(ptl3, PTL3_INDEX(page), frame);
SET_FRAME_FLAGS(ptl3, PTL3_INDEX(page), flags);
}
 
/** Remove mapping of page from hierarchical page tables.
*
* Remove any mapping of page within address space as.
* TLB shootdown should follow in order to make effects of
* this call visible.
*
* Empty page tables except PTL0 are freed.
*
* The page table must be locked and interrupts must be disabled.
*
* @param as Address space to wich page belongs.
* @param page Virtual address of the page to be demapped.
*/
void pt_mapping_remove(as_t *as, uintptr_t page)
{
pte_t *ptl0, *ptl1, *ptl2, *ptl3;
bool empty = true;
int i;
 
/*
* First, remove the mapping, if it exists.
*/
 
ptl0 = (pte_t *) PA2KA((uintptr_t) as->page_table);
 
if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT)
return;
 
ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
 
if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT)
return;
 
ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
 
if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT)
return;
 
ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
 
/* Destroy the mapping. Setting to PAGE_NOT_PRESENT is not sufficient. */
memsetb((uintptr_t) &ptl3[PTL3_INDEX(page)], sizeof(pte_t), 0);
 
/*
* Second, free all empty tables along the way from PTL3 down to PTL0.
*/
/* check PTL3 */
for (i = 0; i < PTL3_ENTRIES; i++) {
if (PTE_VALID(&ptl3[i])) {
empty = false;
break;
}
}
if (empty) {
/*
* PTL3 is empty.
* Release the frame and remove PTL3 pointer from preceding table.
*/
frame_free(KA2PA((uintptr_t) ptl3));
if (PTL2_ENTRIES)
memsetb((uintptr_t) &ptl2[PTL2_INDEX(page)], sizeof(pte_t), 0);
else if (PTL1_ENTRIES)
memsetb((uintptr_t) &ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0);
else
memsetb((uintptr_t) &ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
} else {
/*
* PTL3 is not empty.
* Therefore, there must be a path from PTL0 to PTL3 and
* thus nothing to free in higher levels.
*/
return;
}
/* check PTL2, empty is still true */
if (PTL2_ENTRIES) {
for (i = 0; i < PTL2_ENTRIES; i++) {
if (PTE_VALID(&ptl2[i])) {
empty = false;
break;
}
}
if (empty) {
/*
* PTL2 is empty.
* Release the frame and remove PTL2 pointer from preceding table.
*/
frame_free(KA2PA((uintptr_t) ptl2));
if (PTL1_ENTRIES)
memsetb((uintptr_t) &ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0);
else
memsetb((uintptr_t) &ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
}
else {
/*
* PTL2 is not empty.
* Therefore, there must be a path from PTL0 to PTL2 and
* thus nothing to free in higher levels.
*/
return;
}
}
 
/* check PTL1, empty is still true */
if (PTL1_ENTRIES) {
for (i = 0; i < PTL1_ENTRIES; i++) {
if (PTE_VALID(&ptl1[i])) {
empty = false;
break;
}
}
if (empty) {
/*
* PTL1 is empty.
* Release the frame and remove PTL1 pointer from preceding table.
*/
frame_free(KA2PA((uintptr_t) ptl1));
memsetb((uintptr_t) &ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
}
}
 
}
 
/** Find mapping for virtual page in hierarchical page tables.
*
* Find mapping for virtual page.
*
* The page table must be locked and interrupts must be disabled.
*
* @param as Address space to which page belongs.
* @param page Virtual page.
*
* @return NULL if there is no such mapping; entry from PTL3 describing the mapping otherwise.
*/
pte_t *pt_mapping_find(as_t *as, uintptr_t page)
{
pte_t *ptl0, *ptl1, *ptl2, *ptl3;
 
ptl0 = (pte_t *) PA2KA((uintptr_t) as->page_table);
 
if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT)
return NULL;
 
ptl1 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page)));
 
if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT)
return NULL;
 
ptl2 = (pte_t *) PA2KA(GET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page)));
 
if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT)
return NULL;
 
ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
 
return &ptl3[PTL3_INDEX(page)];
}
 
/** @}
*/
 
/trunk/kernel/genarch/src/mm/as_pt.c
0,0 → 1,144
/*
* Copyright (C) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup genarchmm
* @{
*/
 
/**
* @file
* @brief Address space functions for 4-level hierarchical pagetables.
*/
 
#include <genarch/mm/page_pt.h>
#include <mm/page.h>
#include <mm/frame.h>
#include <mm/as.h>
#include <synch/mutex.h>
#include <arch/mm/page.h>
#include <arch/mm/as.h>
#include <arch/types.h>
#include <typedefs.h>
#include <memstr.h>
#include <arch.h>
 
static pte_t *ptl0_create(int flags);
static void ptl0_destroy(pte_t *page_table);
 
static void pt_lock(as_t *as, bool lock);
static void pt_unlock(as_t *as, bool unlock);
 
as_operations_t as_pt_operations = {
.page_table_create = ptl0_create,
.page_table_destroy = ptl0_destroy,
.page_table_lock = pt_lock,
.page_table_unlock = pt_unlock
};
 
/** Create PTL0.
*
* PTL0 of 4-level page table will be created for each address space.
*
* @param flags Flags can specify whether ptl0 is for the kernel address space.
*
* @return New PTL0.
*/
pte_t *ptl0_create(int flags)
{
pte_t *src_ptl0, *dst_ptl0;
ipl_t ipl;
 
dst_ptl0 = (pte_t *) frame_alloc(ONE_FRAME, FRAME_KA);
 
if (flags & FLAG_AS_KERNEL) {
memsetb((uintptr_t) dst_ptl0, PAGE_SIZE, 0);
} else {
uintptr_t src, dst;
/*
* Copy the kernel address space portion to new PTL0.
*/
ipl = interrupts_disable();
mutex_lock(&AS_KERNEL->lock);
src_ptl0 = (pte_t *) PA2KA((uintptr_t) AS_KERNEL->page_table);
 
src = (uintptr_t) &src_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)];
dst = (uintptr_t) &dst_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)];
 
memsetb((uintptr_t) dst_ptl0, PAGE_SIZE, 0);
memcpy((void *) dst, (void *) src, PAGE_SIZE - (src - (uintptr_t) src_ptl0));
mutex_unlock(&AS_KERNEL->lock);
interrupts_restore(ipl);
}
 
return (pte_t *) KA2PA((uintptr_t) dst_ptl0);
}
 
/** Destroy page table.
*
* Destroy PTL0, other levels are expected to be already deallocated.
*
* @param page_table Physical address of PTL0.
*/
void ptl0_destroy(pte_t *page_table)
{
frame_free((uintptr_t)page_table);
}
 
/** Lock page tables.
*
* Lock only the address space.
* Interrupts must be disabled.
*
* @param as Address space.
* @param lock If false, do not attempt to lock the address space.
*/
void pt_lock(as_t *as, bool lock)
{
if (lock)
mutex_lock(&as->lock);
}
 
/** Unlock page tables.
*
* Unlock the address space.
* Interrupts must be disabled.
*
* @param as Address space.
* @param unlock If false, do not attempt to unlock the address space.
*/
void pt_unlock(as_t *as, bool unlock)
{
if (unlock)
mutex_unlock(&as->lock);
}
 
/** @}
*/
 
/trunk/kernel/genarch/src/mm/page_ht.c
0,0 → 1,249
/*
* Copyright (C) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup genarchmm
* @{
*/
 
/**
* @file
* @brief Virtual Address Translation (VAT) for global page hash table.
*/
 
#include <genarch/mm/page_ht.h>
#include <mm/page.h>
#include <arch/mm/page.h>
#include <mm/frame.h>
#include <mm/slab.h>
#include <mm/as.h>
#include <arch/mm/asid.h>
#include <arch/types.h>
#include <typedefs.h>
#include <arch/asm.h>
#include <synch/spinlock.h>
#include <arch.h>
#include <debug.h>
#include <memstr.h>
#include <adt/hash_table.h>
#include <align.h>
 
static index_t hash(unative_t key[]);
static bool compare(unative_t key[], count_t keys, link_t *item);
static void remove_callback(link_t *item);
 
static void ht_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int flags);
static void ht_mapping_remove(as_t *as, uintptr_t page);
static pte_t *ht_mapping_find(as_t *as, uintptr_t page);
 
/**
* This lock protects the page hash table. It must be acquired
* after address space lock and after any address space area
* locks.
*/
mutex_t page_ht_lock;
 
/**
* Page hash table.
* The page hash table may be accessed only when page_ht_lock is held.
*/
hash_table_t page_ht;
 
/** Hash table operations for page hash table. */
hash_table_operations_t ht_operations = {
.hash = hash,
.compare = compare,
.remove_callback = remove_callback
};
 
/** Page mapping operations for page hash table architectures. */
page_mapping_operations_t ht_mapping_operations = {
.mapping_insert = ht_mapping_insert,
.mapping_remove = ht_mapping_remove,
.mapping_find = ht_mapping_find
};
 
/** Compute page hash table index.
*
* @param key Array of two keys (i.e. page and address space).
*
* @return Index into page hash table.
*/
index_t hash(unative_t key[])
{
as_t *as = (as_t *) key[KEY_AS];
uintptr_t page = (uintptr_t) key[KEY_PAGE];
index_t index;
/*
* Virtual page addresses have roughly the same probability
* of occurring. Least significant bits of VPN compose the
* hash index.
*/
index = ((page >> PAGE_WIDTH) & (PAGE_HT_ENTRIES-1));
/*
* Address space structures are likely to be allocated from
* similar addresses. Least significant bits compose the
* hash index.
*/
index |= ((unative_t) as) & (PAGE_HT_ENTRIES-1);
return index;
}
 
/** Compare page hash table item with page and/or address space.
*
* @param key Array of one or two keys (i.e. page and/or address space).
* @param keys Number of keys passed.
* @param item Item to compare the keys with.
*
* @return true on match, false otherwise.
*/
bool compare(unative_t key[], count_t keys, link_t *item)
{
pte_t *t;
 
ASSERT(item);
ASSERT((keys > 0) && (keys <= PAGE_HT_KEYS));
 
/*
* Convert item to PTE.
*/
t = hash_table_get_instance(item, pte_t, link);
 
if (keys == PAGE_HT_KEYS) {
return (key[KEY_AS] == (uintptr_t) t->as) && (key[KEY_PAGE] == t->page);
} else {
return (key[KEY_AS] == (uintptr_t) t->as);
}
}
 
/** Callback on page hash table item removal.
*
* @param item Page hash table item being removed.
*/
void remove_callback(link_t *item)
{
pte_t *t;
 
ASSERT(item);
 
/*
* Convert item to PTE.
*/
t = hash_table_get_instance(item, pte_t, link);
 
free(t);
}
 
/** Map page to frame using page hash table.
*
* Map virtual address page to physical address frame
* using flags.
*
* The page table must be locked and interrupts must be disabled.
*
* @param as Address space to which page belongs.
* @param page Virtual address of the page to be mapped.
* @param frame Physical address of memory frame to which the mapping is done.
* @param flags Flags to be used for mapping.
*/
void ht_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int flags)
{
pte_t *t;
unative_t key[2] = { (uintptr_t) as, page = ALIGN_DOWN(page, PAGE_SIZE) };
if (!hash_table_find(&page_ht, key)) {
t = (pte_t *) malloc(sizeof(pte_t), FRAME_ATOMIC);
ASSERT(t != NULL);
 
t->g = (flags & PAGE_GLOBAL) != 0;
t->x = (flags & PAGE_EXEC) != 0;
t->w = (flags & PAGE_WRITE) != 0;
t->k = !(flags & PAGE_USER);
t->c = (flags & PAGE_CACHEABLE) != 0;
t->p = !(flags & PAGE_NOT_PRESENT);
 
t->as = as;
t->page = ALIGN_DOWN(page, PAGE_SIZE);
t->frame = ALIGN_DOWN(frame, FRAME_SIZE);
 
hash_table_insert(&page_ht, key, &t->link);
}
}
 
/** Remove mapping of page from page hash table.
*
* Remove any mapping of page within address space as.
* TLB shootdown should follow in order to make effects of
* this call visible.
*
* The page table must be locked and interrupts must be disabled.
*
* @param as Address space to wich page belongs.
* @param page Virtual address of the page to be demapped.
*/
void ht_mapping_remove(as_t *as, uintptr_t page)
{
unative_t key[2] = { (uintptr_t) as, page = ALIGN_DOWN(page, PAGE_SIZE) };
/*
* Note that removed PTE's will be freed
* by remove_callback().
*/
hash_table_remove(&page_ht, key, 2);
}
 
 
/** Find mapping for virtual page in page hash table.
*
* Find mapping for virtual page.
*
* The page table must be locked and interrupts must be disabled.
*
* @param as Address space to wich page belongs.
* @param page Virtual page.
*
* @return NULL if there is no such mapping; requested mapping otherwise.
*/
pte_t *ht_mapping_find(as_t *as, uintptr_t page)
{
link_t *hlp;
pte_t *t = NULL;
unative_t key[2] = { (uintptr_t) as, page = ALIGN_DOWN(page, PAGE_SIZE) };
hlp = hash_table_find(&page_ht, key);
if (hlp)
t = hash_table_get_instance(hlp, pte_t, link);
 
return t;
}
 
/** @}
*/
 
/trunk/kernel/genarch/src/mm/as_ht.c
0,0 → 1,123
/*
* Copyright (C) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup genarchmm
* @{
*/
/**
* @file
* @brief Address space functions for global page hash table.
*/
 
#include <genarch/mm/as_ht.h>
#include <genarch/mm/page_ht.h>
#include <mm/as.h>
#include <mm/frame.h>
#include <arch/types.h>
#include <typedefs.h>
#include <memstr.h>
#include <adt/hash_table.h>
#include <synch/mutex.h>
 
static pte_t *ht_create(int flags);
static void ht_destroy(pte_t *page_table);
 
static void ht_lock(as_t *as, bool lock);
static void ht_unlock(as_t *as, bool unlock);
 
as_operations_t as_ht_operations = {
.page_table_create = ht_create,
.page_table_destroy = ht_destroy,
.page_table_lock = ht_lock,
.page_table_unlock = ht_unlock,
};
 
 
/** Page hash table create.
*
* The page hash table will be created only once
* and will be shared by all address spaces.
*
* @param flags Ignored.
*
* @return Returns NULL.
*/
pte_t *ht_create(int flags)
{
if (flags & FLAG_AS_KERNEL) {
hash_table_create(&page_ht, PAGE_HT_ENTRIES, 2, &ht_operations);
mutex_initialize(&page_ht_lock);
}
return NULL;
}
 
/** Destroy page table.
*
* Actually do nothing as the global page hash table is used.
*
* @param page_table This parameter is ignored.
*/
void ht_destroy(pte_t *page_table)
{
/* No-op. */
}
 
/** Lock page table.
*
* Lock address space and page hash table.
* Interrupts must be disabled.
*
* @param as Address space.
* @param lock If false, do not attempt to lock the address space.
*/
void ht_lock(as_t *as, bool lock)
{
if (lock)
mutex_lock(&as->lock);
mutex_lock(&page_ht_lock);
}
 
/** Unlock page table.
*
* Unlock address space and page hash table.
* Interrupts must be disabled.
*
* @param as Address space.
* @param unlock If false, do not attempt to lock the address space.
*/
void ht_unlock(as_t *as, bool unlock)
{
mutex_unlock(&page_ht_lock);
if (unlock)
mutex_unlock(&as->lock);
}
 
/** @}
*/
 
/trunk/kernel/genarch/src/mm/asid.c
0,0 → 1,179
/*
* Copyright (C) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup genarchmm
* @{
*/
 
/**
* @file
* @brief ASID management.
*
* Modern processor architectures optimize TLB utilization
* by using ASIDs (a.k.a. memory contexts on sparc64 and
* region identifiers on ia64). These ASIDs help to associate
* each TLB item with an address space, thus making
* finer-grained TLB invalidation possible.
*
* Unfortunatelly, there are usually less ASIDs available than
* there can be unique as_t structures (i.e. address spaces
* recognized by the kernel).
*
* When system runs short of ASIDs, it will attempt to steal
* ASID from an address space that has not been active for
* a while.
*
* This code depends on the fact that ASIDS_ALLOCABLE
* is greater than number of supported CPUs (i.e. the
* amount of concurently active address spaces).
*
* Architectures that don't have hardware support for address
* spaces do not compile with this file.
*/
 
#include <mm/asid.h>
#include <mm/as.h>
#include <mm/tlb.h>
#include <arch/mm/asid.h>
#include <synch/spinlock.h>
#include <synch/mutex.h>
#include <arch.h>
#include <adt/list.h>
#include <debug.h>
 
/**
* asidlock protects the asids_allocated counter.
*/
SPINLOCK_INITIALIZE(asidlock);
 
static count_t asids_allocated = 0;
 
/** Allocate free address space identifier.
*
* Interrupts must be disabled and inactive_as_with_asid_lock must be held
* prior to this call
*
* @return New ASID.
*/
asid_t asid_get(void)
{
asid_t asid;
link_t *tmp;
as_t *as;
 
/*
* Check if there is an unallocated ASID.
*/
spinlock_lock(&asidlock);
if (asids_allocated == ASIDS_ALLOCABLE) {
 
/*
* All ASIDs are already allocated.
* Resort to stealing.
*/
/*
* Remove the first item on the list.
* It is guaranteed to belong to an
* inactive address space.
*/
ASSERT(!list_empty(&inactive_as_with_asid_head));
tmp = inactive_as_with_asid_head.next;
list_remove(tmp);
as = list_get_instance(tmp, as_t, inactive_as_with_asid_link);
mutex_lock_active(&as->lock);
 
/*
* Steal the ASID.
* Note that the stolen ASID is not active.
*/
asid = as->asid;
ASSERT(asid != ASID_INVALID);
 
/*
* Notify the address space from wich the ASID
* was stolen by invalidating its asid member.
*/
as->asid = ASID_INVALID;
mutex_unlock(&as->lock);
 
/*
* Get the system rid of the stolen ASID.
*/
tlb_shootdown_start(TLB_INVL_ASID, asid, 0, 0);
tlb_invalidate_asid(asid);
tlb_shootdown_finalize();
} else {
 
/*
* There is at least one unallocated ASID.
* Find it and assign it.
*/
 
asid = asid_find_free();
asids_allocated++;
 
/*
* Purge the allocated rid from TLBs.
*/
tlb_shootdown_start(TLB_INVL_ASID, asid, 0, 0);
tlb_invalidate_asid(asid);
tlb_shootdown_finalize();
}
spinlock_unlock(&asidlock);
return asid;
}
 
/** Release address space identifier.
*
* This code relies on architecture
* dependent functionality.
*
* @param asid ASID to be released.
*/
void asid_put(asid_t asid)
{
ipl_t ipl;
 
ipl = interrupts_disable();
spinlock_lock(&asidlock);
 
asids_allocated--;
asid_put_arch(asid);
spinlock_unlock(&asidlock);
interrupts_restore(ipl);
}
 
/** @}
*/
 
/trunk/kernel/genarch/src/mm/asid_fifo.c
0,0 → 1,99
/*
* Copyright (C) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup genarchmm
* @{
*/
/**
* @file
* @brief FIFO queue ASID management.
*
* Architectures that link with this file keep the unallocated ASIDs
* in FIFO queue. The queue can be statically (e.g. mips32) or
* dynamically allocated (e.g ia64 and sparc64).
*/
 
#include <genarch/mm/asid_fifo.h>
#include <arch/mm/asid.h>
#include <mm/asid.h>
#include <typedefs.h>
#include <adt/fifo.h>
 
#define FIFO_STATIC_LIMIT 1024
#define FIFO_STATIC (ASIDS_ALLOCABLE<FIFO_STATIC_LIMIT)
 
/**
* FIFO queue containing unassigned ASIDs.
* Can be only accessed when asidlock is held.
*/
#if FIFO_STATIC
FIFO_INITIALIZE_STATIC(free_asids, asid_t, ASIDS_ALLOCABLE);
#else
FIFO_INITIALIZE_DYNAMIC(free_asids, asid_t, ASIDS_ALLOCABLE);
#endif
 
/** Initialize data structures for O(1) ASID allocation and deallocation. */
void asid_fifo_init(void)
{
int i;
 
#if (!FIFO_STATIC)
fifo_create(free_asids);
#endif
for (i = 0; i < ASIDS_ALLOCABLE; i++) {
fifo_push(free_asids, ASID_START + i);
}
}
 
/** Allocate free ASID.
*
* Allocation runs in O(1).
*
* @return Free ASID.
*/
asid_t asid_find_free(void)
{
return fifo_pop(free_asids);
}
 
/** Return ASID among free ASIDs.
*
* This operation runs in O(1).
*
* @param asid ASID being freed.
*/
void asid_put_arch(asid_t asid)
{
fifo_push(free_asids, asid);
}
 
/** @}
*/