Subversion Repositories HelenOS-historic

Rev

Rev 699 | Rev 755 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 699 Rev 746
Line 32... Line 32...
32
#include <mm/heap.h>
32
#include <mm/heap.h>
33
#include <arch/mm/asid.h>
33
#include <arch/mm/asid.h>
34
#include <arch/types.h>
34
#include <arch/types.h>
35
#include <typedefs.h>
35
#include <typedefs.h>
36
#include <arch/asm.h>
36
#include <arch/asm.h>
-
 
37
#include <synch/spinlock.h>
-
 
38
#include <arch.h>
37
#include <debug.h>
39
#include <debug.h>
38
 
40
 
-
 
41
/**
-
 
42
 * This lock protects the page hash table. Note that software must
-
 
43
 * be still careful about ordering of writes to ensure consistent
-
 
44
 * view of the page hash table for hardware helpers such as VHPT
-
 
45
 * walker on ia64.
-
 
46
 */
-
 
47
SPINLOCK_INITIALIZE(page_ht_lock);
-
 
48
 
-
 
49
/**
-
 
50
 * Page hash table pointer.
-
 
51
 * The page hash table may be accessed only when page_ht_lock is held.
-
 
52
 */
-
 
53
pte_t *page_ht = NULL;
-
 
54
 
39
static void ht_mapping_insert(__address page, asid_t asid, __address frame, int flags, __address root);
55
static void ht_mapping_insert(__address page, asid_t asid, __address frame, int flags, __address root);
40
static pte_t *ht_mapping_find(__address page, asid_t asid, __address root);
56
static pte_t *ht_mapping_find(__address page, asid_t asid, __address root);
41
 
57
 
42
page_operations_t page_ht_operations = {
58
page_operations_t page_ht_operations = {
43
    .mapping_insert = ht_mapping_insert,
59
    .mapping_insert = ht_mapping_insert,
Line 45... Line 61...
45
};
61
};
46
 
62
 
47
/** Map page to frame using page hash table.
63
/** Map page to frame using page hash table.
48
 *
64
 *
49
 * Map virtual address 'page' to physical address 'frame'
65
 * Map virtual address 'page' to physical address 'frame'
-
 
66
 * using 'flags'. In order not to disturb hardware searching,
-
 
67
 * new mappings are appended to the end of the collision
50
 * using 'flags'.
68
 * chain.
51
 *
69
 *
52
 * @param page Virtual address of the page to be mapped.
70
 * @param page Virtual address of the page to be mapped.
53
 * @param asid Address space to which page belongs.
71
 * @param asid Address space to which page belongs.
54
 * @param frame Physical address of memory frame to which the mapping is done.
72
 * @param frame Physical address of memory frame to which the mapping is done.
55
 * @param flags Flags to be used for mapping.
73
 * @param flags Flags to be used for mapping.
56
 * @param root Ignored.
74
 * @param root Ignored.
57
 */
75
 */
58
void ht_mapping_insert(__address page, asid_t asid, __address frame, int flags, __address root)
76
void ht_mapping_insert(__address page, asid_t asid, __address frame, int flags, __address root)
59
{
77
{
60
    pte_t *t, *u = NULL;
78
    pte_t *t, *u;
-
 
79
    ipl_t ipl;
-
 
80
   
-
 
81
    ipl = interrupts_disable();
-
 
82
    spinlock_lock(&page_ht_lock);
61
   
83
   
62
    t = HT_HASH(page, asid);
84
    t = HT_HASH(page, asid);
63
    if (!HT_SLOT_EMPTY(t)) {
85
    if (!HT_SLOT_EMPTY(t)) {
-
 
86
   
-
 
87
        /*
-
 
88
         * The slot is occupied.
-
 
89
         * Walk through the collision chain and append the mapping to its end.
-
 
90
         */
-
 
91
         
-
 
92
        do {
-
 
93
            u = t;
-
 
94
            if (HT_COMPARE(page, asid, t)) {
-
 
95
                /*
-
 
96
                 * Nothing to do,
-
 
97
                 * the record is already there.
-
 
98
                 */
-
 
99
                spinlock_unlock(&page_ht_lock);
-
 
100
                interrupts_restore(ipl);
-
 
101
                return;
-
 
102
            }
-
 
103
        } while ((t = HT_GET_NEXT(t)));
-
 
104
   
64
        u = (pte_t *) malloc(sizeof(pte_t));    /* FIXME: use slab allocator for this */
105
        t = (pte_t *) malloc(sizeof(pte_t));    /* FIXME: use slab allocator for this */
65
        if (!u)
106
        if (!t)
66
            panic("could not allocate memory for hash table\n");
107
            panic("could not allocate memory\n");
-
 
108
 
67
        *u = *t;
109
        HT_SET_NEXT(u, t);
68
    }
110
    }
69
    HT_SET_NEXT(t, u);
111
   
70
    HT_SET_RECORD(t, page, asid, frame, flags);
112
    HT_SET_RECORD(t, page, asid, frame, flags);
-
 
113
    HT_SET_NEXT(t, NULL);
-
 
114
   
-
 
115
    spinlock_unlock(&page_ht_lock);
-
 
116
    interrupts_restore(ipl);
71
}
117
}
72
 
118
 
73
/** Find mapping for virtual page in page hash table.
119
/** Find mapping for virtual page in page hash table.
74
 *
120
 *
75
 * Find mapping for virtual page.
121
 * Find mapping for virtual page.
76
 *
122
 *
-
 
123
 * Interrupts must be disabled.
-
 
124
 *
77
 * @param page Virtual page.
125
 * @param page Virtual page.
78
 * @param asid Address space to wich page belongs.
126
 * @param asid Address space to wich page belongs.
79
 * @param root Ignored.
127
 * @param root Ignored.
80
 *
128
 *
81
 * @return NULL if there is no such mapping; requested mapping otherwise.
129
 * @return NULL if there is no such mapping; requested mapping otherwise.
82
 */
130
 */
83
pte_t *ht_mapping_find(__address page, asid_t asid, __address root)
131
pte_t *ht_mapping_find(__address page, asid_t asid, __address root)
84
{
132
{
85
    pte_t *t;
133
    pte_t *t;
86
   
134
   
-
 
135
    spinlock_lock(&page_ht_lock);
87
    t = HT_HASH(page, asid);
136
    t = HT_HASH(page, asid);
-
 
137
    if (!HT_SLOT_EMPTY(t)) {
88
    while (!HT_COMPARE(page, asid, t) && HT_GET_NEXT(t))
138
        while (!HT_COMPARE(page, asid, t) && HT_GET_NEXT(t))
89
        t = HT_GET_NEXT(t);
139
            t = HT_GET_NEXT(t);
-
 
140
        t = HT_COMPARE(page, asid, t) ? t : NULL;
-
 
141
    } else {
-
 
142
        t = NULL;
-
 
143
    }
-
 
144
    spinlock_unlock(&page_ht_lock);
-
 
145
    return t;
-
 
146
}
-
 
147
 
-
 
148
/** Invalidate page hash table.
-
 
149
 *
-
 
150
 * Interrupts must be disabled.
-
 
151
 */
-
 
152
void ht_invalidate_all(void)
-
 
153
{
-
 
154
    pte_t *t, *u;
-
 
155
    int i;
90
   
156
   
-
 
157
    spinlock_lock(&page_ht_lock);
-
 
158
    for (i = 0; i < HT_ENTRIES; i++) {
-
 
159
        if (!HT_SLOT_EMPTY(&page_ht[i])) {
91
    return HT_COMPARE(page, asid, t) ? t : NULL;
160
            t = HT_GET_NEXT(&page_ht[i]);
-
 
161
            while (t) {
-
 
162
                u = t;
-
 
163
                t = HT_GET_NEXT(t);
-
 
164
                free(u);        /* FIXME: use slab allocator for this */
-
 
165
            }
-
 
166
            HT_INVALIDATE_SLOT(&page_ht[i]);
-
 
167
        }
-
 
168
    }
-
 
169
    spinlock_unlock(&page_ht_lock);
92
}
170
}