/kernel/trunk/genarch/include/mm/page_ht.h |
---|
28,8 → 28,6 |
/* |
* This is the generic page hash table interface. |
* Architectures that use single page hash table for |
* storing page translations must implement it. |
*/ |
#ifndef __PAGE_HT_H__ |
37,75 → 35,32 |
#include <mm/page.h> |
#include <typedefs.h> |
#include <arch/types.h> |
#include <adt/list.h> |
#include <adt/hash_table.h> |
/** Page hash table size. */ |
#define HT_WIDTH HT_WIDTH_ARCH |
#define HT_SIZE (1<<HT_WIDTH) |
#define HT_ENTRIES (HT_SIZE/sizeof(pte_t)) |
#define PAGE_HT_KEYS 2 |
#define KEY_AS 0 |
#define KEY_PAGE 1 |
/** Hash function. |
* |
* @param page Virtual address. Only vpn bits will be used. |
* @param asid Address space identifier. |
* |
* @return Pointer to hash table typed pte_t *. |
*/ |
#define HT_HASH(page, asid) HT_HASH_ARCH(page, asid) |
#define PAGE_HT_ENTRIES_BITS 13 |
#define PAGE_HT_ENTRIES (1<<PAGE_HT_ENTRIES_BITS) |
/** Compare PTE with page and asid. |
* |
* @param page Virtual address. Only vpn bits will be used. |
* @param asid Address space identifier. |
* @param t PTE. |
* |
* @return 1 on match, 0 otherwise. |
*/ |
#define HT_COMPARE(page, asid, t) HT_COMPARE_ARCH(page, asid, t) |
struct pte { |
link_t link; /**< Page hash table link. */ |
as_t *as; /**< Address space. */ |
__address page; /**< Virtual memory page. */ |
__address frame; /**< Physical memory frame. */ |
int flags; |
unsigned a : 1; /**< Accessed. */ |
unsigned d : 1; /**< Dirty. */ |
unsigned p : 1; /**< Present. */ |
}; |
/** Identify empty page hash table slots. |
* |
* @param t Pointer ro hash table typed pte_t *. |
* |
* @return 1 if the slot is empty, 0 otherwise. |
*/ |
#define HT_SLOT_EMPTY(t) HT_SLOT_EMPTY_ARCH(t) |
/** Invalidate/empty page hash table slot. |
* |
* @param t Address of the slot to be invalidated. |
*/ |
#define HT_INVALIDATE_SLOT(t) HT_INVALIDATE_SLOT_ARCH(t) |
/** Return next record in collision chain. |
* |
* @param t PTE. |
* |
* @return Successor of PTE or NULL. |
*/ |
#define HT_GET_NEXT(t) HT_GET_NEXT_ARCH(t) |
/** Set successor in collision chain. |
* |
* @param t PTE. |
* @param s Successor or NULL. |
*/ |
#define HT_SET_NEXT(t, s) HT_SET_NEXT_ARCH(t, s) |
/** Set page hash table record. |
* |
* @param t PTE. |
* @param page Virtual address. Only vpn bits will be used. |
* @param asid Address space identifier. |
* @param frame Physical address. Only pfn bits will be used. |
* @param flags Flags. See mm/page.h. |
*/ |
#define HT_SET_RECORD(t, page, asid, frame, flags) HT_SET_RECORD_ARCH(t, page, asid, frame, flags) |
extern page_operations_t page_ht_operations; |
extern spinlock_t page_ht_lock; |
extern pte_t *page_ht; |
extern hash_table_t page_ht; |
extern hash_table_operations_t ht_operations; |
extern void ht_invalidate_all(void); |
#endif |
/kernel/trunk/genarch/src/mm/as_ht.c |
---|
33,6 → 33,7 |
#include <arch/types.h> |
#include <typedefs.h> |
#include <memstr.h> |
#include <adt/hash_table.h> |
static pte_t *ht_create(int flags); |
48,13 → 49,12 |
* |
* @param flags Ignored. |
* |
* @return Address of global page hash table. |
* @return Returns NULL. |
*/ |
pte_t *ht_create(int flags) |
{ |
if (!page_ht) { |
page_ht = (pte_t *) frame_alloc(HT_WIDTH - FRAME_WIDTH, FRAME_KA | FRAME_PANIC); |
memsetb((__address) page_ht, HT_SIZE, 0); |
if (flags & FLAG_AS_KERNEL) { |
hash_table_create(&page_ht, PAGE_HT_ENTRIES, 2, &ht_operations); |
} |
return page_ht; |
return NULL; |
} |
/kernel/trunk/genarch/src/mm/page_ht.c |
---|
28,6 → 28,7 |
#include <genarch/mm/page_ht.h> |
#include <mm/page.h> |
#include <arch/mm/page.h> |
#include <mm/frame.h> |
#include <mm/heap.h> |
#include <mm/as.h> |
39,7 → 40,15 |
#include <arch.h> |
#include <debug.h> |
#include <memstr.h> |
#include <adt/hash_table.h> |
static index_t hash(__native key[]); |
static bool compare(__native key[], count_t keys, link_t *item); |
static void remove_callback(link_t *item); |
static void ht_mapping_insert(as_t *as, __address page, __address frame, int flags); |
static pte_t *ht_mapping_find(as_t *as, __address page); |
/** |
* This lock protects the page hash table. |
*/ |
46,13 → 55,17 |
SPINLOCK_INITIALIZE(page_ht_lock); |
/** |
* Page hash table pointer. |
* Page hash table. |
* The page hash table may be accessed only when page_ht_lock is held. |
*/ |
pte_t *page_ht = NULL; |
hash_table_t page_ht; |
static void ht_mapping_insert(as_t *as, __address page, __address frame, int flags); |
static pte_t *ht_mapping_find(as_t *as, __address page); |
/** Hash table operations for page hash table. */ |
hash_table_operations_t ht_operations = { |
.hash = hash, |
.compare = compare, |
.remove_callback = remove_callback |
}; |
page_operations_t page_ht_operations = { |
.mapping_insert = ht_mapping_insert, |
59,6 → 72,80 |
.mapping_find = ht_mapping_find |
}; |
/** Compute page hash table index. |
* |
* @param key Array of two keys (i.e. page and address space). |
* |
* @return Index into page hash table. |
*/ |
index_t hash(__native key[]) |
{ |
as_t *as = (as_t *) key[KEY_AS]; |
__address page = (__address) key[KEY_PAGE]; |
index_t index; |
/* |
* Virtual page addresses have roughly the same probability |
* of occurring. Least significant bits of VPN compose the |
* hash index. |
*/ |
index = ((page >> PAGE_WIDTH) & (PAGE_HT_ENTRIES-1)); |
/* |
* Address space structures are likely to be allocated from |
* similar addresses. Least significant bits compose the |
* hash index. |
*/ |
index |= ((__native) as) & (PAGE_HT_ENTRIES-1); |
return index; |
} |
/** Compare page hash table item with page and/or address space. |
* |
* @param key Array of one or two keys (i.e. page and/or address space). |
* @param keys Number of keys passed. |
* @param item Item to compare the keys with. |
* |
* @return true on match, false otherwise. |
*/ |
bool compare(__native key[], count_t keys, link_t *item) |
{ |
pte_t *t; |
ASSERT(item); |
ASSERT((keys > 0) && (keys <= PAGE_HT_KEYS)); |
/* |
* Convert item to PTE. |
*/ |
t = list_get_instance(item, pte_t, link); |
if (keys == PAGE_HT_KEYS) { |
return (key[KEY_AS] == (__address) t->as) && (key[KEY_PAGE] == t->page); |
} else { |
return (key[KEY_AS] == (__address) t->as); |
} |
} |
/** Callback on page hash table item removal. |
* |
* @param item Page hash table item being removed. |
*/ |
void remove_callback(link_t *item) |
{ |
pte_t *t; |
ASSERT(item); |
/* |
* Convert item to PTE. |
*/ |
t = list_get_instance(item, pte_t, link); |
free(t); |
} |
/** Map page to frame using page hash table. |
* |
* Map virtual address 'page' to physical address 'frame' |
73,43 → 160,20 |
*/ |
void ht_mapping_insert(as_t *as, __address page, __address frame, int flags) |
{ |
pte_t *t, *u; |
pte_t *t; |
ipl_t ipl; |
__native key[2] = { (__address) as, page }; |
ipl = interrupts_disable(); |
spinlock_lock(&page_ht_lock); |
t = HT_HASH(page, as->asid); |
if (!HT_SLOT_EMPTY(t)) { |
if (!hash_table_find(&page_ht, key)) { |
t = (pte_t *) malloc(sizeof(pte_t)); |
ASSERT(t != NULL); |
/* |
* The slot is occupied. |
* Walk through the collision chain and append the mapping to its end. |
*/ |
do { |
u = t; |
if (HT_COMPARE(page, as->asid, t)) { |
/* |
* Nothing to do, |
* the record is already there. |
*/ |
spinlock_unlock(&page_ht_lock); |
interrupts_restore(ipl); |
return; |
hash_table_insert(&page_ht, key, &t->link); |
} |
} while ((t = HT_GET_NEXT(t))); |
t = (pte_t *) malloc(sizeof(pte_t)); /* FIXME: use slab allocator for this */ |
if (!t) |
panic("could not allocate memory\n"); |
HT_SET_NEXT(u, t); |
} |
HT_SET_RECORD(t, page, as->asid, frame, flags); |
HT_SET_NEXT(t, NULL); |
spinlock_unlock(&page_ht_lock); |
interrupts_restore(ipl); |
} |
127,41 → 191,16 |
*/ |
pte_t *ht_mapping_find(as_t *as, __address page) |
{ |
pte_t *t; |
link_t *hlp; |
pte_t *t = NULL; |
__native key[2] = { (__address) as, page }; |
spinlock_lock(&page_ht_lock); |
t = HT_HASH(page, as->asid); |
if (!HT_SLOT_EMPTY(t)) { |
while (!HT_COMPARE(page, as->asid, t) && HT_GET_NEXT(t)) |
t = HT_GET_NEXT(t); |
t = HT_COMPARE(page, as->asid, t) ? t : NULL; |
} else { |
t = NULL; |
} |
spinlock_unlock(&page_ht_lock); |
return t; |
} |
/** Invalidate page hash table. |
* |
* Interrupts must be disabled. |
*/ |
void ht_invalidate_all(void) |
{ |
pte_t *t, *u; |
int i; |
hlp = hash_table_find(&page_ht, key); |
if (hlp) |
t = list_get_instance(hlp, pte_t, link); |
spinlock_lock(&page_ht_lock); |
for (i = 0; i < HT_ENTRIES; i++) { |
if (!HT_SLOT_EMPTY(&page_ht[i])) { |
t = HT_GET_NEXT(&page_ht[i]); |
while (t) { |
u = t; |
t = HT_GET_NEXT(t); |
free(u); /* FIXME: use slab allocator for this */ |
} |
HT_INVALIDATE_SLOT(&page_ht[i]); |
} |
} |
spinlock_unlock(&page_ht_lock); |
return t; |
} |
/kernel/trunk/generic/include/adt/hash_table.h |
---|
29,9 → 29,9 |
#ifndef __HASH_TABLE_H__ |
#define __HASH_TABLE_H__ |
#include <adt/list.h> |
#include <arch/types.h> |
#include <typedefs.h> |
#include <adt/list.h> |
/** Hash table structure. */ |
struct hash_table { |
53,7 → 53,7 |
/** Hash table item comparison function. |
* |
* @param key Array of keys that will be compared against item. It is not necessary to pass all keys. |
* @param key Array of keys that will be compared with item. It is not necessary to pass all keys. |
* |
* @return true if the keys match, false otherwise. |
*/ |
69,7 → 69,7 |
#define hash_table_get_instance(item, type, member) list_get_instance((item), (type), (member)) |
extern void hash_table_create(hash_table_t *h, count_t m, count_t max_keys, hash_table_operations_t *op); |
extern bool hash_table_insert(hash_table_t *h, __native key[], link_t *item); |
extern void hash_table_insert(hash_table_t *h, __native key[], link_t *item); |
extern link_t *hash_table_find(hash_table_t *h, __native key[]); |
extern void hash_table_remove(hash_table_t *h, __native key[], count_t keys); |
/kernel/trunk/generic/src/adt/hash_table.c |
---|
67,10 → 67,8 |
* @param h Hash table. |
* @param hey Array of all keys necessary to compute hash index. |
* @param item Item to be inserted into the hash table. |
* |
* @return true on success, false if the keys were already present in the hash table. |
*/ |
bool hash_table_insert(hash_table_t *h, __native key[], link_t *item) |
void hash_table_insert(hash_table_t *h, __native key[], link_t *item) |
{ |
index_t chain; |
80,16 → 78,7 |
chain = h->op->hash(key); |
ASSERT(chain < h->entries); |
if (hash_table_find(h, key)) { |
/* |
* The hash table is not redundant. |
* Signal failure on return. |
*/ |
return false; |
} |
list_append(item, &h->entry[chain]); |
return true; |
} |
/** Search hash table for an item matching keys. |
/kernel/trunk/generic/src/mm/as.c |
---|
281,7 → 281,6 |
ipl = interrupts_disable(); |
spinlock_lock(&as->lock); |
ASSERT(as->page_table); |
SET_PTL0_ADDRESS(as->page_table); |
spinlock_unlock(&as->lock); |
interrupts_restore(ipl); |
/kernel/trunk/arch/sparc64/include/types.h |
---|
44,7 → 44,7 |
typedef __u64 __native; |
typedef __u64 pte_t; |
typedef struct pte pte_t; |
typedef __u8 asi_t; |
/kernel/trunk/arch/sparc64/include/mm/page.h |
---|
41,16 → 41,6 |
#define SET_PTL0_ADDRESS_ARCH(x) /**< To be removed as situation permits. */ |
/** Implementation of page hash table interface. */ |
#define HT_WIDTH_ARCH 20 /* 1M */ |
#define HT_HASH_ARCH(page, asid) 0 |
#define HT_COMPARE_ARCH(page, asid, t) 0 |
#define HT_SLOT_EMPTY_ARCH(t) 1 |
#define HT_INVALIDATE_SLOT_ARCH(t) |
#define HT_GET_NEXT_ARCH(t) 0 |
#define HT_SET_NEXT_ARCH(t, s) |
#define HT_SET_RECORD_ARCH(t, page, asid, frame, flags) |
union page_address { |
__address address; |
struct { |
/kernel/trunk/arch/sparc64/src/mm/frame.c |
---|
32,5 → 32,11 |
void frame_arch_init(void) |
{ |
/* |
* Workaround to prevent slab allocator from allocating fram 0, |
* which is not, at that time, mapped. |
*/ |
frame_region_not_free(0, FRAME_SIZE); |
zone_create_in_region(0, config.memory_size & ~(FRAME_SIZE - 1)); |
} |
/kernel/trunk/arch/ia64/include/types.h |
---|
47,6 → 47,6 |
typedef __u64 __native; |
typedef union vhpt_entry pte_t; |
typedef struct pte pte_t; |
#endif |
/kernel/trunk/arch/ia64/include/mm/page.h |
---|
45,16 → 45,6 |
#define SET_PTL0_ADDRESS_ARCH(x) /**< To be removed as situation permits. */ |
/** Implementation of page hash table interface. */ |
#define HT_WIDTH_ARCH 20 /* 1M */ |
#define HT_HASH_ARCH(page, asid) vhpt_hash((page), (asid)) |
#define HT_COMPARE_ARCH(page, asid, t) vhpt_compare((page), (asid), (t)) |
#define HT_SLOT_EMPTY_ARCH(t) ((t)->present.tag.tag_info.ti) |
#define HT_INVALIDATE_SLOT_ARCH(t) (t)->present.tag.tag_info.ti = true |
#define HT_GET_NEXT_ARCH(t) (t)->present.next |
#define HT_SET_NEXT_ARCH(t, s) (t)->present.next = (s) |
#define HT_SET_RECORD_ARCH(t, page, asid, frame, flags) vhpt_set_record(t, page, asid, frame, flags) |
#define PPN_SHIFT 12 |
#define VRN_SHIFT 61 |
64,7 → 54,7 |
#define VHPT_WIDTH 20 /* 1M */ |
#define VHPT_SIZE (1<<VHPT_WIDTH) |
#define VHPT_BASE page_ht /* Must be aligned to VHPT_SIZE */ |
#define VHPT_BASE 0 /* Must be aligned to VHPT_SIZE */ |
#define PTA_BASE_SHIFT 15 |
115,7 → 105,7 |
union vhpt_tag tag; |
/* Word 3 */ |
pte_t *next; /**< Collision chain next pointer. */ |
__u64 ig3 : 64; |
} __attribute__ ((packed)); |
struct vhpt_entry_not_present { |
133,8 → 123,7 |
union vhpt_tag tag; |
/* Word 3 */ |
pte_t *next; /**< Collision chain next pointer. */ |
__u64 ig3 : 64; |
} __attribute__ ((packed)); |
typedef union vhpt_entry { |
141,7 → 130,7 |
struct vhpt_entry_present present; |
struct vhpt_entry_not_present not_present; |
__u64 word[4]; |
} vhpt_entry; |
} vhpt_entry_t; |
struct region_register_map { |
unsigned ve : 1; |
257,8 → 246,9 |
} |
extern void page_arch_init(void); |
extern pte_t *vhpt_hash(__address page, asid_t asid); |
extern bool vhpt_compare(__address page, asid_t asid, pte_t *t); |
extern void vhpt_set_record(pte_t *t, __address page, asid_t asid, __address frame, int flags); |
extern vhpt_entry_t *vhpt_hash(__address page, asid_t asid); |
extern bool vhpt_compare(__address page, asid_t asid, vhpt_entry_t *v); |
extern void vhpt_set_record(vhpt_entry_t *v, __address page, asid_t asid, __address frame, int flags); |
#endif |
/kernel/trunk/arch/ia64/src/mm/page.c |
---|
42,7 → 42,7 |
#include <arch/barrier.h> |
#include <memstr.h> |
static void set_vhpt_environment(void); |
static void set_environment(void); |
/** Initialize ia64 virtual address translation subsystem. */ |
void page_arch_init(void) |
49,11 → 49,11 |
{ |
page_operations = &page_ht_operations; |
pk_disable(); |
set_vhpt_environment(); |
set_environment(); |
} |
/** Initialize VHPT and region registers. */ |
void set_vhpt_environment(void) |
void set_environment(void) |
{ |
region_register rr; |
pta_register pta; |
87,13 → 87,6 |
} |
/* |
* Allocate VHPT and invalidate all its entries. |
*/ |
page_ht = (pte_t *) frame_alloc(VHPT_WIDTH - FRAME_WIDTH, FRAME_KA); |
memsetb((__address) page_ht, VHPT_SIZE, 0); |
ht_invalidate_all(); |
/* |
* Set up PTA register. |
*/ |
pta.word = pta_read(); |
100,7 → 93,7 |
pta.map.ve = 0; /* disable VHPT walker */ |
pta.map.vf = 1; /* large entry format */ |
pta.map.size = VHPT_WIDTH; |
pta.map.base = ((__address) page_ht) >> PTA_BASE_SHIFT; |
pta.map.base = VHPT_BASE >> PTA_BASE_SHIFT; |
pta_write(pta.word); |
srlz_i(); |
srlz_d(); |
113,14 → 106,14 |
* @param page Address of virtual page including VRN bits. |
* @param asid Address space identifier. |
* |
* @return Head of VHPT collision chain for page and asid. |
* @return VHPT entry address. |
*/ |
pte_t *vhpt_hash(__address page, asid_t asid) |
vhpt_entry_t *vhpt_hash(__address page, asid_t asid) |
{ |
region_register rr_save, rr; |
index_t vrn; |
rid_t rid; |
pte_t *t; |
vhpt_entry_t *v; |
vrn = page >> VRN_SHIFT; |
rid = ASID2RID(asid, vrn); |
130,8 → 123,8 |
/* |
* The RID is already in place, compute thash and return. |
*/ |
t = (pte_t *) thash(page); |
return t; |
v = (vhpt_entry_t *) thash(page); |
return v; |
} |
/* |
142,12 → 135,12 |
rr.map.rid = rid; |
rr_write(vrn, rr.word); |
srlz_i(); |
t = (pte_t *) thash(page); |
v = (vhpt_entry_t *) thash(page); |
rr_write(vrn, rr_save.word); |
srlz_i(); |
srlz_d(); |
return t; |
return v; |
} |
/** Compare ASID and VPN against PTE. |
159,7 → 152,7 |
* |
* @return True if page and asid match the page and asid of t, false otherwise. |
*/ |
bool vhpt_compare(__address page, asid_t asid, pte_t *t) |
bool vhpt_compare(__address page, asid_t asid, vhpt_entry_t *v) |
{ |
region_register rr_save, rr; |
index_t vrn; |
166,7 → 159,7 |
rid_t rid; |
bool match; |
ASSERT(t); |
ASSERT(v); |
vrn = page >> VRN_SHIFT; |
rid = ASID2RID(asid, vrn); |
176,7 → 169,7 |
/* |
* The RID is already in place, compare ttag with t and return. |
*/ |
return ttag(page) == t->present.tag.tag_word; |
return ttag(page) == v->present.tag.tag_word; |
} |
/* |
187,7 → 180,7 |
rr.map.rid = rid; |
rr_write(vrn, rr.word); |
srlz_i(); |
match = (ttag(page) == t->present.tag.tag_word); |
match = (ttag(page) == v->present.tag.tag_word); |
rr_write(vrn, rr_save.word); |
srlz_i(); |
srlz_d(); |
203,7 → 196,7 |
* @param frame Physical address of the frame to wich page is mapped. |
* @param flags Different flags for the mapping. |
*/ |
void vhpt_set_record(pte_t *t, __address page, asid_t asid, __address frame, int flags) |
void vhpt_set_record(vhpt_entry_t *v, __address page, asid_t asid, __address frame, int flags) |
{ |
region_register rr_save, rr; |
index_t vrn; |
210,7 → 203,7 |
rid_t rid; |
__u64 tag; |
ASSERT(t); |
ASSERT(v); |
vrn = page >> VRN_SHIFT; |
rid = ASID2RID(asid, vrn); |
231,22 → 224,21 |
/* |
* Clear the entry. |
*/ |
t->word[0] = 0; |
t->word[1] = 0; |
t->word[2] = 0; |
t->word[3] = 0; |
v->word[0] = 0; |
v->word[1] = 0; |
v->word[2] = 0; |
v->word[3] = 0; |
t->present.p = true; |
t->present.ma = (flags & PAGE_CACHEABLE) ? MA_WRITEBACK : MA_UNCACHEABLE; |
t->present.a = false; /* not accessed */ |
t->present.d = false; /* not dirty */ |
t->present.pl = (flags & PAGE_USER) ? PL_USER : PL_KERNEL; |
t->present.ar = (flags & PAGE_WRITE) ? AR_WRITE : AR_READ; |
t->present.ar |= (flags & PAGE_EXEC) ? AR_EXECUTE : 0; |
t->present.ppn = frame >> PPN_SHIFT; |
t->present.ed = false; /* exception not deffered */ |
t->present.ps = PAGE_WIDTH; |
t->present.key = 0; |
t->present.tag.tag_word = tag; |
t->present.next = NULL; |
v->present.p = true; |
v->present.ma = (flags & PAGE_CACHEABLE) ? MA_WRITEBACK : MA_UNCACHEABLE; |
v->present.a = false; /* not accessed */ |
v->present.d = false; /* not dirty */ |
v->present.pl = (flags & PAGE_USER) ? PL_USER : PL_KERNEL; |
v->present.ar = (flags & PAGE_WRITE) ? AR_WRITE : AR_READ; |
v->present.ar |= (flags & PAGE_EXEC) ? AR_EXECUTE : 0; |
v->present.ppn = frame >> PPN_SHIFT; |
v->present.ed = false; /* exception not deffered */ |
v->present.ps = PAGE_WIDTH; |
v->present.key = 0; |
v->present.tag.tag_word = tag; |
} |