Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 791 → Rev 792

/kernel/trunk/genarch/include/mm/page_ht.h
28,8 → 28,6
 
/*
* This is the generic page hash table interface.
* Architectures that use single page hash table for
* storing page translations must implement it.
*/
 
#ifndef __PAGE_HT_H__
37,75 → 35,32
 
#include <mm/page.h>
#include <typedefs.h>
#include <arch/types.h>
#include <adt/list.h>
#include <adt/hash_table.h>
 
/** Page hash table size. */
#define HT_WIDTH HT_WIDTH_ARCH
#define HT_SIZE (1<<HT_WIDTH)
#define HT_ENTRIES (HT_SIZE/sizeof(pte_t))
#define PAGE_HT_KEYS 2
#define KEY_AS 0
#define KEY_PAGE 1
 
/** Hash function.
*
* @param page Virtual address. Only vpn bits will be used.
* @param asid Address space identifier.
*
* @return Pointer to hash table typed pte_t *.
*/
#define HT_HASH(page, asid) HT_HASH_ARCH(page, asid)
#define PAGE_HT_ENTRIES_BITS 13
#define PAGE_HT_ENTRIES (1<<PAGE_HT_ENTRIES_BITS)
 
/** Compare PTE with page and asid.
*
* @param page Virtual address. Only vpn bits will be used.
* @param asid Address space identifier.
* @param t PTE.
*
* @return 1 on match, 0 otherwise.
*/
#define HT_COMPARE(page, asid, t) HT_COMPARE_ARCH(page, asid, t)
struct pte {
link_t link; /**< Page hash table link. */
as_t *as; /**< Address space. */
__address page; /**< Virtual memory page. */
__address frame; /**< Physical memory frame. */
int flags;
unsigned a : 1; /**< Accessed. */
unsigned d : 1; /**< Dirty. */
unsigned p : 1; /**< Present. */
};
 
/** Identify empty page hash table slots.
*
* @param t Pointer ro hash table typed pte_t *.
*
* @return 1 if the slot is empty, 0 otherwise.
*/
#define HT_SLOT_EMPTY(t) HT_SLOT_EMPTY_ARCH(t)
 
/** Invalidate/empty page hash table slot.
*
* @param t Address of the slot to be invalidated.
*/
#define HT_INVALIDATE_SLOT(t) HT_INVALIDATE_SLOT_ARCH(t)
 
/** Return next record in collision chain.
*
* @param t PTE.
*
* @return Successor of PTE or NULL.
*/
#define HT_GET_NEXT(t) HT_GET_NEXT_ARCH(t)
 
/** Set successor in collision chain.
*
* @param t PTE.
* @param s Successor or NULL.
*/
#define HT_SET_NEXT(t, s) HT_SET_NEXT_ARCH(t, s)
 
/** Set page hash table record.
*
* @param t PTE.
* @param page Virtual address. Only vpn bits will be used.
* @param asid Address space identifier.
* @param frame Physical address. Only pfn bits will be used.
* @param flags Flags. See mm/page.h.
*/
#define HT_SET_RECORD(t, page, asid, frame, flags) HT_SET_RECORD_ARCH(t, page, asid, frame, flags)
 
extern page_operations_t page_ht_operations;
extern spinlock_t page_ht_lock;
 
extern pte_t *page_ht;
extern hash_table_t page_ht;
extern hash_table_operations_t ht_operations;
 
extern void ht_invalidate_all(void);
 
#endif
/kernel/trunk/genarch/src/mm/as_ht.c
33,6 → 33,7
#include <arch/types.h>
#include <typedefs.h>
#include <memstr.h>
#include <adt/hash_table.h>
 
static pte_t *ht_create(int flags);
 
48,13 → 49,12
*
* @param flags Ignored.
*
* @return Address of global page hash table.
* @return Returns NULL.
*/
pte_t *ht_create(int flags)
{
if (!page_ht) {
page_ht = (pte_t *) frame_alloc(HT_WIDTH - FRAME_WIDTH, FRAME_KA | FRAME_PANIC);
memsetb((__address) page_ht, HT_SIZE, 0);
if (flags & FLAG_AS_KERNEL) {
hash_table_create(&page_ht, PAGE_HT_ENTRIES, 2, &ht_operations);
}
return page_ht;
return NULL;
}
/kernel/trunk/genarch/src/mm/page_ht.c
28,6 → 28,7
 
#include <genarch/mm/page_ht.h>
#include <mm/page.h>
#include <arch/mm/page.h>
#include <mm/frame.h>
#include <mm/heap.h>
#include <mm/as.h>
39,7 → 40,15
#include <arch.h>
#include <debug.h>
#include <memstr.h>
#include <adt/hash_table.h>
 
static index_t hash(__native key[]);
static bool compare(__native key[], count_t keys, link_t *item);
static void remove_callback(link_t *item);
 
static void ht_mapping_insert(as_t *as, __address page, __address frame, int flags);
static pte_t *ht_mapping_find(as_t *as, __address page);
 
/**
* This lock protects the page hash table.
*/
46,13 → 55,17
SPINLOCK_INITIALIZE(page_ht_lock);
 
/**
* Page hash table pointer.
* Page hash table.
* The page hash table may be accessed only when page_ht_lock is held.
*/
pte_t *page_ht = NULL;
hash_table_t page_ht;
 
static void ht_mapping_insert(as_t *as, __address page, __address frame, int flags);
static pte_t *ht_mapping_find(as_t *as, __address page);
/** Hash table operations for page hash table. */
hash_table_operations_t ht_operations = {
.hash = hash,
.compare = compare,
.remove_callback = remove_callback
};
 
page_operations_t page_ht_operations = {
.mapping_insert = ht_mapping_insert,
59,6 → 72,80
.mapping_find = ht_mapping_find
};
 
/** Compute page hash table index.
*
* @param key Array of two keys (i.e. page and address space).
*
* @return Index into page hash table.
*/
index_t hash(__native key[])
{
as_t *as = (as_t *) key[KEY_AS];
__address page = (__address) key[KEY_PAGE];
index_t index;
/*
* Virtual page addresses have roughly the same probability
* of occurring. Least significant bits of VPN compose the
* hash index.
*/
index = ((page >> PAGE_WIDTH) & (PAGE_HT_ENTRIES-1));
/*
* Address space structures are likely to be allocated from
* similar addresses. Least significant bits compose the
* hash index.
*/
index |= ((__native) as) & (PAGE_HT_ENTRIES-1);
return index;
}
 
/** Compare page hash table item with page and/or address space.
*
* @param key Array of one or two keys (i.e. page and/or address space).
* @param keys Number of keys passed.
* @param item Item to compare the keys with.
*
* @return true on match, false otherwise.
*/
bool compare(__native key[], count_t keys, link_t *item)
{
pte_t *t;
 
ASSERT(item);
ASSERT((keys > 0) && (keys <= PAGE_HT_KEYS));
 
/*
* Convert item to PTE.
*/
t = list_get_instance(item, pte_t, link);
 
if (keys == PAGE_HT_KEYS) {
return (key[KEY_AS] == (__address) t->as) && (key[KEY_PAGE] == t->page);
} else {
return (key[KEY_AS] == (__address) t->as);
}
}
 
/** Callback on page hash table item removal.
*
* @param item Page hash table item being removed.
*/
void remove_callback(link_t *item)
{
pte_t *t;
 
ASSERT(item);
 
/*
* Convert item to PTE.
*/
t = list_get_instance(item, pte_t, link);
 
free(t);
}
 
/** Map page to frame using page hash table.
*
* Map virtual address 'page' to physical address 'frame'
73,43 → 160,20
*/
void ht_mapping_insert(as_t *as, __address page, __address frame, int flags)
{
pte_t *t, *u;
pte_t *t;
ipl_t ipl;
__native key[2] = { (__address) as, page };
ipl = interrupts_disable();
spinlock_lock(&page_ht_lock);
 
t = HT_HASH(page, as->asid);
if (!HT_SLOT_EMPTY(t)) {
if (!hash_table_find(&page_ht, key)) {
t = (pte_t *) malloc(sizeof(pte_t));
ASSERT(t != NULL);
/*
* The slot is occupied.
* Walk through the collision chain and append the mapping to its end.
*/
do {
u = t;
if (HT_COMPARE(page, as->asid, t)) {
/*
* Nothing to do,
* the record is already there.
*/
spinlock_unlock(&page_ht_lock);
interrupts_restore(ipl);
return;
}
} while ((t = HT_GET_NEXT(t)));
t = (pte_t *) malloc(sizeof(pte_t)); /* FIXME: use slab allocator for this */
if (!t)
panic("could not allocate memory\n");
 
HT_SET_NEXT(u, t);
hash_table_insert(&page_ht, key, &t->link);
}
HT_SET_RECORD(t, page, as->asid, frame, flags);
HT_SET_NEXT(t, NULL);
spinlock_unlock(&page_ht_lock);
interrupts_restore(ipl);
}
127,41 → 191,16
*/
pte_t *ht_mapping_find(as_t *as, __address page)
{
pte_t *t;
link_t *hlp;
pte_t *t = NULL;
__native key[2] = { (__address) as, page };
spinlock_lock(&page_ht_lock);
t = HT_HASH(page, as->asid);
if (!HT_SLOT_EMPTY(t)) {
while (!HT_COMPARE(page, as->asid, t) && HT_GET_NEXT(t))
t = HT_GET_NEXT(t);
t = HT_COMPARE(page, as->asid, t) ? t : NULL;
} else {
t = NULL;
}
 
hlp = hash_table_find(&page_ht, key);
if (hlp)
t = list_get_instance(hlp, pte_t, link);
 
spinlock_unlock(&page_ht_lock);
return t;
}
 
/** Invalidate page hash table.
*
* Interrupts must be disabled.
*/
void ht_invalidate_all(void)
{
pte_t *t, *u;
int i;
spinlock_lock(&page_ht_lock);
for (i = 0; i < HT_ENTRIES; i++) {
if (!HT_SLOT_EMPTY(&page_ht[i])) {
t = HT_GET_NEXT(&page_ht[i]);
while (t) {
u = t;
t = HT_GET_NEXT(t);
free(u); /* FIXME: use slab allocator for this */
}
HT_INVALIDATE_SLOT(&page_ht[i]);
}
}
spinlock_unlock(&page_ht_lock);
}