Subversion Repositories HelenOS

Rev

Rev 2082 | Rev 2462 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2082 Rev 2089
1
/*
1
/*
2
 * Copyright (c) 2006 Jakub Jermar
2
 * Copyright (c) 2006 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup ia64mm 
29
/** @addtogroup ia64mm 
30
 * @{
30
 * @{
31
 */
31
 */
32
/** @file
32
/** @file
33
 */
33
 */
34
 
34
 
35
/*
35
/*
36
 * TLB management.
36
 * TLB management.
37
 */
37
 */
38
 
38
 
39
#include <mm/tlb.h>
39
#include <mm/tlb.h>
40
#include <mm/asid.h>
40
#include <mm/asid.h>
41
#include <mm/page.h>
41
#include <mm/page.h>
42
#include <mm/as.h>
42
#include <mm/as.h>
43
#include <arch/mm/tlb.h>
43
#include <arch/mm/tlb.h>
44
#include <arch/mm/page.h>
44
#include <arch/mm/page.h>
45
#include <arch/mm/vhpt.h>
45
#include <arch/mm/vhpt.h>
46
#include <arch/barrier.h>
46
#include <arch/barrier.h>
47
#include <arch/interrupt.h>
47
#include <arch/interrupt.h>
48
#include <arch/pal/pal.h>
48
#include <arch/pal/pal.h>
49
#include <arch/asm.h>
49
#include <arch/asm.h>
50
#include <typedefs.h>
-
 
51
#include <panic.h>
50
#include <panic.h>
52
#include <print.h>
51
#include <print.h>
53
#include <arch.h>
52
#include <arch.h>
54
#include <interrupt.h>
53
#include <interrupt.h>
55
 
54
 
56
/** Invalidate all TLB entries. */
55
/** Invalidate all TLB entries. */
57
void tlb_invalidate_all(void)
56
void tlb_invalidate_all(void)
58
{
57
{
59
    ipl_t ipl;
58
    ipl_t ipl;
60
    uintptr_t adr;
59
    uintptr_t adr;
61
    uint32_t count1, count2, stride1, stride2;
60
    uint32_t count1, count2, stride1, stride2;
62
       
61
       
63
    int i, j;
62
    int i, j;
64
       
63
       
65
    adr = PAL_PTCE_INFO_BASE();
64
    adr = PAL_PTCE_INFO_BASE();
66
    count1 = PAL_PTCE_INFO_COUNT1();
65
    count1 = PAL_PTCE_INFO_COUNT1();
67
    count2 = PAL_PTCE_INFO_COUNT2();
66
    count2 = PAL_PTCE_INFO_COUNT2();
68
    stride1 = PAL_PTCE_INFO_STRIDE1();
67
    stride1 = PAL_PTCE_INFO_STRIDE1();
69
    stride2 = PAL_PTCE_INFO_STRIDE2();
68
    stride2 = PAL_PTCE_INFO_STRIDE2();
70
       
69
       
71
    ipl = interrupts_disable();
70
    ipl = interrupts_disable();
72
 
71
 
73
    for(i = 0; i < count1; i++) {
72
    for(i = 0; i < count1; i++) {
74
        for(j = 0; j < count2; j++) {
73
        for(j = 0; j < count2; j++) {
75
            asm volatile (
74
            asm volatile (
76
                "ptc.e %0 ;;"
75
                "ptc.e %0 ;;"
77
                :
76
                :
78
                : "r" (adr)
77
                : "r" (adr)
79
            );
78
            );
80
            adr += stride2;
79
            adr += stride2;
81
        }
80
        }
82
        adr += stride1;
81
        adr += stride1;
83
    }
82
    }
84
 
83
 
85
    interrupts_restore(ipl);
84
    interrupts_restore(ipl);
86
 
85
 
87
    srlz_d();
86
    srlz_d();
88
    srlz_i();
87
    srlz_i();
89
#ifdef CONFIG_VHPT
88
#ifdef CONFIG_VHPT
90
    vhpt_invalidate_all();
89
    vhpt_invalidate_all();
91
#endif  
90
#endif  
92
}
91
}
93
 
92
 
94
/** Invalidate entries belonging to an address space.
93
/** Invalidate entries belonging to an address space.
95
 *
94
 *
96
 * @param asid Address space identifier.
95
 * @param asid Address space identifier.
97
 */
96
 */
98
void tlb_invalidate_asid(asid_t asid)
97
void tlb_invalidate_asid(asid_t asid)
99
{
98
{
100
    tlb_invalidate_all();
99
    tlb_invalidate_all();
101
}
100
}
102
 
101
 
103
 
102
 
104
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
103
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
105
{
104
{
106
    region_register rr;
105
    region_register rr;
107
    bool restore_rr = false;
106
    bool restore_rr = false;
108
    int b = 0;
107
    int b = 0;
109
    int c = cnt;
108
    int c = cnt;
110
 
109
 
111
    uintptr_t va;
110
    uintptr_t va;
112
    va = page;
111
    va = page;
113
 
112
 
114
    rr.word = rr_read(VA2VRN(va));
113
    rr.word = rr_read(VA2VRN(va));
115
    if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
114
    if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
116
        /*
115
        /*
117
         * The selected region register does not contain required RID.
116
         * The selected region register does not contain required RID.
118
         * Save the old content of the register and replace the RID.
117
         * Save the old content of the register and replace the RID.
119
         */
118
         */
120
        region_register rr0;
119
        region_register rr0;
121
 
120
 
122
        rr0 = rr;
121
        rr0 = rr;
123
        rr0.map.rid = ASID2RID(asid, VA2VRN(va));
122
        rr0.map.rid = ASID2RID(asid, VA2VRN(va));
124
        rr_write(VA2VRN(va), rr0.word);
123
        rr_write(VA2VRN(va), rr0.word);
125
        srlz_d();
124
        srlz_d();
126
        srlz_i();
125
        srlz_i();
127
    }
126
    }
128
   
127
   
129
    while(c >>= 1)
128
    while(c >>= 1)
130
        b++;
129
        b++;
131
    b >>= 1;
130
    b >>= 1;
132
    uint64_t ps;
131
    uint64_t ps;
133
   
132
   
134
    switch (b) {
133
    switch (b) {
135
    case 0: /*cnt 1-3*/
134
    case 0: /*cnt 1-3*/
136
        ps = PAGE_WIDTH;
135
        ps = PAGE_WIDTH;
137
        break;
136
        break;
138
    case 1: /*cnt 4-15*/
137
    case 1: /*cnt 4-15*/
139
        /*cnt=((cnt-1)/4)+1;*/
138
        /*cnt=((cnt-1)/4)+1;*/
140
        ps = PAGE_WIDTH+2;
139
        ps = PAGE_WIDTH+2;
141
        va &= ~((1<<ps)-1);
140
        va &= ~((1<<ps)-1);
142
        break;
141
        break;
143
    case 2: /*cnt 16-63*/
142
    case 2: /*cnt 16-63*/
144
        /*cnt=((cnt-1)/16)+1;*/
143
        /*cnt=((cnt-1)/16)+1;*/
145
        ps = PAGE_WIDTH+4;
144
        ps = PAGE_WIDTH+4;
146
        va &= ~((1<<ps)-1);
145
        va &= ~((1<<ps)-1);
147
        break;
146
        break;
148
    case 3: /*cnt 64-255*/
147
    case 3: /*cnt 64-255*/
149
        /*cnt=((cnt-1)/64)+1;*/
148
        /*cnt=((cnt-1)/64)+1;*/
150
        ps = PAGE_WIDTH+6;
149
        ps = PAGE_WIDTH+6;
151
        va &= ~((1<<ps)-1);
150
        va &= ~((1<<ps)-1);
152
        break;
151
        break;
153
    case 4: /*cnt 256-1023*/
152
    case 4: /*cnt 256-1023*/
154
        /*cnt=((cnt-1)/256)+1;*/
153
        /*cnt=((cnt-1)/256)+1;*/
155
        ps = PAGE_WIDTH+8;
154
        ps = PAGE_WIDTH+8;
156
        va &= ~((1<<ps)-1);
155
        va &= ~((1<<ps)-1);
157
        break;
156
        break;
158
    case 5: /*cnt 1024-4095*/
157
    case 5: /*cnt 1024-4095*/
159
        /*cnt=((cnt-1)/1024)+1;*/
158
        /*cnt=((cnt-1)/1024)+1;*/
160
        ps = PAGE_WIDTH+10;
159
        ps = PAGE_WIDTH+10;
161
        va &= ~((1<<ps)-1);
160
        va &= ~((1<<ps)-1);
162
        break;
161
        break;
163
    case 6: /*cnt 4096-16383*/
162
    case 6: /*cnt 4096-16383*/
164
        /*cnt=((cnt-1)/4096)+1;*/
163
        /*cnt=((cnt-1)/4096)+1;*/
165
        ps = PAGE_WIDTH+12;
164
        ps = PAGE_WIDTH+12;
166
        va &= ~((1<<ps)-1);
165
        va &= ~((1<<ps)-1);
167
        break;
166
        break;
168
    case 7: /*cnt 16384-65535*/
167
    case 7: /*cnt 16384-65535*/
169
    case 8: /*cnt 65536-(256K-1)*/
168
    case 8: /*cnt 65536-(256K-1)*/
170
        /*cnt=((cnt-1)/16384)+1;*/
169
        /*cnt=((cnt-1)/16384)+1;*/
171
        ps = PAGE_WIDTH+14;
170
        ps = PAGE_WIDTH+14;
172
        va &= ~((1<<ps)-1);
171
        va &= ~((1<<ps)-1);
173
        break;
172
        break;
174
    default:
173
    default:
175
        /*cnt=((cnt-1)/(16384*16))+1;*/
174
        /*cnt=((cnt-1)/(16384*16))+1;*/
176
        ps=PAGE_WIDTH+18;
175
        ps=PAGE_WIDTH+18;
177
        va&=~((1<<ps)-1);
176
        va&=~((1<<ps)-1);
178
        break;
177
        break;
179
    }
178
    }
180
    /*cnt+=(page!=va);*/
179
    /*cnt+=(page!=va);*/
181
    for(; va<(page+cnt*(PAGE_SIZE)); va += (1<<ps)) {
180
    for(; va<(page+cnt*(PAGE_SIZE)); va += (1<<ps)) {
182
        asm volatile (
181
        asm volatile (
183
            "ptc.l %0,%1;;"
182
            "ptc.l %0,%1;;"
184
            :
183
            :
185
            : "r" (va), "r" (ps<<2)
184
            : "r" (va), "r" (ps<<2)
186
        );
185
        );
187
    }
186
    }
188
    srlz_d();
187
    srlz_d();
189
    srlz_i();
188
    srlz_i();
190
   
189
   
191
    if (restore_rr) {
190
    if (restore_rr) {
192
        rr_write(VA2VRN(va), rr.word);
191
        rr_write(VA2VRN(va), rr.word);
193
        srlz_d();
192
        srlz_d();
194
        srlz_i();
193
        srlz_i();
195
    }
194
    }
196
}
195
}
197
 
196
 
198
/** Insert data into data translation cache.
197
/** Insert data into data translation cache.
199
 *
198
 *
200
 * @param va Virtual page address.
199
 * @param va Virtual page address.
201
 * @param asid Address space identifier.
200
 * @param asid Address space identifier.
202
 * @param entry The rest of TLB entry as required by TLB insertion format.
201
 * @param entry The rest of TLB entry as required by TLB insertion format.
203
 */
202
 */
204
void dtc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
203
void dtc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
205
{
204
{
206
    tc_mapping_insert(va, asid, entry, true);
205
    tc_mapping_insert(va, asid, entry, true);
207
}
206
}
208
 
207
 
209
/** Insert data into instruction translation cache.
208
/** Insert data into instruction translation cache.
210
 *
209
 *
211
 * @param va Virtual page address.
210
 * @param va Virtual page address.
212
 * @param asid Address space identifier.
211
 * @param asid Address space identifier.
213
 * @param entry The rest of TLB entry as required by TLB insertion format.
212
 * @param entry The rest of TLB entry as required by TLB insertion format.
214
 */
213
 */
215
void itc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
214
void itc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
216
{
215
{
217
    tc_mapping_insert(va, asid, entry, false);
216
    tc_mapping_insert(va, asid, entry, false);
218
}
217
}
219
 
218
 
220
/** Insert data into instruction or data translation cache.
219
/** Insert data into instruction or data translation cache.
221
 *
220
 *
222
 * @param va Virtual page address.
221
 * @param va Virtual page address.
223
 * @param asid Address space identifier.
222
 * @param asid Address space identifier.
224
 * @param entry The rest of TLB entry as required by TLB insertion format.
223
 * @param entry The rest of TLB entry as required by TLB insertion format.
225
 * @param dtc If true, insert into data translation cache, use instruction translation cache otherwise.
224
 * @param dtc If true, insert into data translation cache, use instruction translation cache otherwise.
226
 */
225
 */
227
void tc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtc)
226
void tc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtc)
228
{
227
{
229
    region_register rr;
228
    region_register rr;
230
    bool restore_rr = false;
229
    bool restore_rr = false;
231
 
230
 
232
    rr.word = rr_read(VA2VRN(va));
231
    rr.word = rr_read(VA2VRN(va));
233
    if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
232
    if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
234
        /*
233
        /*
235
         * The selected region register does not contain required RID.
234
         * The selected region register does not contain required RID.
236
         * Save the old content of the register and replace the RID.
235
         * Save the old content of the register and replace the RID.
237
         */
236
         */
238
        region_register rr0;
237
        region_register rr0;
239
 
238
 
240
        rr0 = rr;
239
        rr0 = rr;
241
        rr0.map.rid = ASID2RID(asid, VA2VRN(va));
240
        rr0.map.rid = ASID2RID(asid, VA2VRN(va));
242
        rr_write(VA2VRN(va), rr0.word);
241
        rr_write(VA2VRN(va), rr0.word);
243
        srlz_d();
242
        srlz_d();
244
        srlz_i();
243
        srlz_i();
245
    }
244
    }
246
   
245
   
247
    asm volatile (
246
    asm volatile (
248
        "mov r8=psr;;\n"
247
        "mov r8=psr;;\n"
249
        "rsm %0;;\n"            /* PSR_IC_MASK */
248
        "rsm %0;;\n"            /* PSR_IC_MASK */
250
        "srlz.d;;\n"
249
        "srlz.d;;\n"
251
        "srlz.i;;\n"
250
        "srlz.i;;\n"
252
        "mov cr.ifa=%1\n"       /* va */
251
        "mov cr.ifa=%1\n"       /* va */
253
        "mov cr.itir=%2;;\n"        /* entry.word[1] */
252
        "mov cr.itir=%2;;\n"        /* entry.word[1] */
254
        "cmp.eq p6,p7 = %4,r0;;\n"  /* decide between itc and dtc */
253
        "cmp.eq p6,p7 = %4,r0;;\n"  /* decide between itc and dtc */
255
        "(p6) itc.i %3;;\n"
254
        "(p6) itc.i %3;;\n"
256
        "(p7) itc.d %3;;\n"
255
        "(p7) itc.d %3;;\n"
257
        "mov psr.l=r8;;\n"
256
        "mov psr.l=r8;;\n"
258
        "srlz.d;;\n"
257
        "srlz.d;;\n"
259
        :
258
        :
260
        : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc)
259
        : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc)
261
        : "p6", "p7", "r8"
260
        : "p6", "p7", "r8"
262
    );
261
    );
263
   
262
   
264
    if (restore_rr) {
263
    if (restore_rr) {
265
        rr_write(VA2VRN(va), rr.word);
264
        rr_write(VA2VRN(va), rr.word);
266
        srlz_d();
265
        srlz_d();
267
        srlz_i();
266
        srlz_i();
268
    }
267
    }
269
}
268
}
270
 
269
 
271
/** Insert data into instruction translation register.
270
/** Insert data into instruction translation register.
272
 *
271
 *
273
 * @param va Virtual page address.
272
 * @param va Virtual page address.
274
 * @param asid Address space identifier.
273
 * @param asid Address space identifier.
275
 * @param entry The rest of TLB entry as required by TLB insertion format.
274
 * @param entry The rest of TLB entry as required by TLB insertion format.
276
 * @param tr Translation register.
275
 * @param tr Translation register.
277
 */
276
 */
278
void itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr)
277
void itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr)
279
{
278
{
280
    tr_mapping_insert(va, asid, entry, false, tr);
279
    tr_mapping_insert(va, asid, entry, false, tr);
281
}
280
}
282
 
281
 
283
/** Insert data into data translation register.
282
/** Insert data into data translation register.
284
 *
283
 *
285
 * @param va Virtual page address.
284
 * @param va Virtual page address.
286
 * @param asid Address space identifier.
285
 * @param asid Address space identifier.
287
 * @param entry The rest of TLB entry as required by TLB insertion format.
286
 * @param entry The rest of TLB entry as required by TLB insertion format.
288
 * @param tr Translation register.
287
 * @param tr Translation register.
289
 */
288
 */
290
void dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr)
289
void dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr)
291
{
290
{
292
    tr_mapping_insert(va, asid, entry, true, tr);
291
    tr_mapping_insert(va, asid, entry, true, tr);
293
}
292
}
294
 
293
 
295
/** Insert data into instruction or data translation register.
294
/** Insert data into instruction or data translation register.
296
 *
295
 *
297
 * @param va Virtual page address.
296
 * @param va Virtual page address.
298
 * @param asid Address space identifier.
297
 * @param asid Address space identifier.
299
 * @param entry The rest of TLB entry as required by TLB insertion format.
298
 * @param entry The rest of TLB entry as required by TLB insertion format.
300
 * @param dtr If true, insert into data translation register, use instruction translation register otherwise.
299
 * @param dtr If true, insert into data translation register, use instruction translation register otherwise.
301
 * @param tr Translation register.
300
 * @param tr Translation register.
302
 */
301
 */
303
void tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr)
302
void tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr)
304
{
303
{
305
    region_register rr;
304
    region_register rr;
306
    bool restore_rr = false;
305
    bool restore_rr = false;
307
 
306
 
308
    rr.word = rr_read(VA2VRN(va));
307
    rr.word = rr_read(VA2VRN(va));
309
    if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
308
    if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
310
        /*
309
        /*
311
         * The selected region register does not contain required RID.
310
         * The selected region register does not contain required RID.
312
         * Save the old content of the register and replace the RID.
311
         * Save the old content of the register and replace the RID.
313
         */
312
         */
314
        region_register rr0;
313
        region_register rr0;
315
 
314
 
316
        rr0 = rr;
315
        rr0 = rr;
317
        rr0.map.rid = ASID2RID(asid, VA2VRN(va));
316
        rr0.map.rid = ASID2RID(asid, VA2VRN(va));
318
        rr_write(VA2VRN(va), rr0.word);
317
        rr_write(VA2VRN(va), rr0.word);
319
        srlz_d();
318
        srlz_d();
320
        srlz_i();
319
        srlz_i();
321
    }
320
    }
322
 
321
 
323
    asm volatile (
322
    asm volatile (
324
        "mov r8=psr;;\n"
323
        "mov r8=psr;;\n"
325
        "rsm %0;;\n"            /* PSR_IC_MASK */
324
        "rsm %0;;\n"            /* PSR_IC_MASK */
326
        "srlz.d;;\n"
325
        "srlz.d;;\n"
327
        "srlz.i;;\n"
326
        "srlz.i;;\n"
328
        "mov cr.ifa=%1\n"           /* va */         
327
        "mov cr.ifa=%1\n"           /* va */         
329
        "mov cr.itir=%2;;\n"        /* entry.word[1] */
328
        "mov cr.itir=%2;;\n"        /* entry.word[1] */
330
        "cmp.eq p6,p7=%5,r0;;\n"    /* decide between itr and dtr */
329
        "cmp.eq p6,p7=%5,r0;;\n"    /* decide between itr and dtr */
331
        "(p6) itr.i itr[%4]=%3;;\n"
330
        "(p6) itr.i itr[%4]=%3;;\n"
332
        "(p7) itr.d dtr[%4]=%3;;\n"
331
        "(p7) itr.d dtr[%4]=%3;;\n"
333
        "mov psr.l=r8;;\n"
332
        "mov psr.l=r8;;\n"
334
        "srlz.d;;\n"
333
        "srlz.d;;\n"
335
        :
334
        :
336
        : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr)
335
        : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr)
337
        : "p6", "p7", "r8"
336
        : "p6", "p7", "r8"
338
    );
337
    );
339
   
338
   
340
    if (restore_rr) {
339
    if (restore_rr) {
341
        rr_write(VA2VRN(va), rr.word);
340
        rr_write(VA2VRN(va), rr.word);
342
        srlz_d();
341
        srlz_d();
343
        srlz_i();
342
        srlz_i();
344
    }
343
    }
345
}
344
}
346
 
345
 
347
/** Insert data into DTLB.
346
/** Insert data into DTLB.
348
 *
347
 *
349
 * @param page Virtual page address including VRN bits.
348
 * @param page Virtual page address including VRN bits.
350
 * @param frame Physical frame address.
349
 * @param frame Physical frame address.
351
 * @param dtr If true, insert into data translation register, use data translation cache otherwise.
350
 * @param dtr If true, insert into data translation register, use data translation cache otherwise.
352
 * @param tr Translation register if dtr is true, ignored otherwise.
351
 * @param tr Translation register if dtr is true, ignored otherwise.
353
 */
352
 */
354
void dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr, index_t tr)
353
void dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr, index_t tr)
355
{
354
{
356
    tlb_entry_t entry;
355
    tlb_entry_t entry;
357
   
356
   
358
    entry.word[0] = 0;
357
    entry.word[0] = 0;
359
    entry.word[1] = 0;
358
    entry.word[1] = 0;
360
   
359
   
361
    entry.p = true;         /* present */
360
    entry.p = true;         /* present */
362
    entry.ma = MA_WRITEBACK;
361
    entry.ma = MA_WRITEBACK;
363
    entry.a = true;         /* already accessed */
362
    entry.a = true;         /* already accessed */
364
    entry.d = true;         /* already dirty */
363
    entry.d = true;         /* already dirty */
365
    entry.pl = PL_KERNEL;
364
    entry.pl = PL_KERNEL;
366
    entry.ar = AR_READ | AR_WRITE;
365
    entry.ar = AR_READ | AR_WRITE;
367
    entry.ppn = frame >> PPN_SHIFT;
366
    entry.ppn = frame >> PPN_SHIFT;
368
    entry.ps = PAGE_WIDTH;
367
    entry.ps = PAGE_WIDTH;
369
   
368
   
370
    if (dtr)
369
    if (dtr)
371
        dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
370
        dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
372
    else
371
    else
373
        dtc_mapping_insert(page, ASID_KERNEL, entry);
372
        dtc_mapping_insert(page, ASID_KERNEL, entry);
374
}
373
}
375
 
374
 
376
/** Purge kernel entries from DTR.
375
/** Purge kernel entries from DTR.
377
 *
376
 *
378
 * Purge DTR entries used by the kernel.
377
 * Purge DTR entries used by the kernel.
379
 *
378
 *
380
 * @param page Virtual page address including VRN bits.
379
 * @param page Virtual page address including VRN bits.
381
 * @param width Width of the purge in bits.
380
 * @param width Width of the purge in bits.
382
 */
381
 */
383
void dtr_purge(uintptr_t page, count_t width)
382
void dtr_purge(uintptr_t page, count_t width)
384
{
383
{
385
    asm volatile ("ptr.d %0, %1\n" : : "r" (page), "r" (width<<2));
384
    asm volatile ("ptr.d %0, %1\n" : : "r" (page), "r" (width<<2));
386
}
385
}
387
 
386
 
388
 
387
 
389
/** Copy content of PTE into data translation cache.
388
/** Copy content of PTE into data translation cache.
390
 *
389
 *
391
 * @param t PTE.
390
 * @param t PTE.
392
 */
391
 */
393
void dtc_pte_copy(pte_t *t)
392
void dtc_pte_copy(pte_t *t)
394
{
393
{
395
    tlb_entry_t entry;
394
    tlb_entry_t entry;
396
 
395
 
397
    entry.word[0] = 0;
396
    entry.word[0] = 0;
398
    entry.word[1] = 0;
397
    entry.word[1] = 0;
399
   
398
   
400
    entry.p = t->p;
399
    entry.p = t->p;
401
    entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
400
    entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
402
    entry.a = t->a;
401
    entry.a = t->a;
403
    entry.d = t->d;
402
    entry.d = t->d;
404
    entry.pl = t->k ? PL_KERNEL : PL_USER;
403
    entry.pl = t->k ? PL_KERNEL : PL_USER;
405
    entry.ar = t->w ? AR_WRITE : AR_READ;
404
    entry.ar = t->w ? AR_WRITE : AR_READ;
406
    entry.ppn = t->frame >> PPN_SHIFT;
405
    entry.ppn = t->frame >> PPN_SHIFT;
407
    entry.ps = PAGE_WIDTH;
406
    entry.ps = PAGE_WIDTH;
408
   
407
   
409
    dtc_mapping_insert(t->page, t->as->asid, entry);
408
    dtc_mapping_insert(t->page, t->as->asid, entry);
410
#ifdef CONFIG_VHPT
409
#ifdef CONFIG_VHPT
411
    vhpt_mapping_insert(t->page, t->as->asid, entry);
410
    vhpt_mapping_insert(t->page, t->as->asid, entry);
412
#endif  
411
#endif  
413
}
412
}
414
 
413
 
415
/** Copy content of PTE into instruction translation cache.
414
/** Copy content of PTE into instruction translation cache.
416
 *
415
 *
417
 * @param t PTE.
416
 * @param t PTE.
418
 */
417
 */
419
void itc_pte_copy(pte_t *t)
418
void itc_pte_copy(pte_t *t)
420
{
419
{
421
    tlb_entry_t entry;
420
    tlb_entry_t entry;
422
 
421
 
423
    entry.word[0] = 0;
422
    entry.word[0] = 0;
424
    entry.word[1] = 0;
423
    entry.word[1] = 0;
425
   
424
   
426
    ASSERT(t->x);
425
    ASSERT(t->x);
427
   
426
   
428
    entry.p = t->p;
427
    entry.p = t->p;
429
    entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
428
    entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
430
    entry.a = t->a;
429
    entry.a = t->a;
431
    entry.pl = t->k ? PL_KERNEL : PL_USER;
430
    entry.pl = t->k ? PL_KERNEL : PL_USER;
432
    entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
431
    entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
433
    entry.ppn = t->frame >> PPN_SHIFT;
432
    entry.ppn = t->frame >> PPN_SHIFT;
434
    entry.ps = PAGE_WIDTH;
433
    entry.ps = PAGE_WIDTH;
435
   
434
   
436
    itc_mapping_insert(t->page, t->as->asid, entry);
435
    itc_mapping_insert(t->page, t->as->asid, entry);
437
#ifdef CONFIG_VHPT
436
#ifdef CONFIG_VHPT
438
    vhpt_mapping_insert(t->page, t->as->asid, entry);
437
    vhpt_mapping_insert(t->page, t->as->asid, entry);
439
#endif  
438
#endif  
440
}
439
}
441
 
440
 
442
/** Instruction TLB fault handler for faults with VHPT turned off.
441
/** Instruction TLB fault handler for faults with VHPT turned off.
443
 *
442
 *
444
 * @param vector Interruption vector.
443
 * @param vector Interruption vector.
445
 * @param istate Structure with saved interruption state.
444
 * @param istate Structure with saved interruption state.
446
 */
445
 */
447
void alternate_instruction_tlb_fault(uint64_t vector, istate_t *istate)
446
void alternate_instruction_tlb_fault(uint64_t vector, istate_t *istate)
448
{
447
{
449
    region_register rr;
448
    region_register rr;
450
    rid_t rid;
449
    rid_t rid;
451
    uintptr_t va;
450
    uintptr_t va;
452
    pte_t *t;
451
    pte_t *t;
453
   
452
   
454
    va = istate->cr_ifa;    /* faulting address */
453
    va = istate->cr_ifa;    /* faulting address */
455
    rr.word = rr_read(VA2VRN(va));
454
    rr.word = rr_read(VA2VRN(va));
456
    rid = rr.map.rid;
455
    rid = rr.map.rid;
457
 
456
 
458
    page_table_lock(AS, true);
457
    page_table_lock(AS, true);
459
    t = page_mapping_find(AS, va);
458
    t = page_mapping_find(AS, va);
460
    if (t) {
459
    if (t) {
461
        /*
460
        /*
462
         * The mapping was found in software page hash table.
461
         * The mapping was found in software page hash table.
463
         * Insert it into data translation cache.
462
         * Insert it into data translation cache.
464
         */
463
         */
465
        itc_pte_copy(t);
464
        itc_pte_copy(t);
466
        page_table_unlock(AS, true);
465
        page_table_unlock(AS, true);
467
    } else {
466
    } else {
468
        /*
467
        /*
469
         * Forward the page fault to address space page fault handler.
468
         * Forward the page fault to address space page fault handler.
470
         */
469
         */
471
        page_table_unlock(AS, true);
470
        page_table_unlock(AS, true);
472
        if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
471
        if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
473
            fault_if_from_uspace(istate,"Page fault at %p",va);
472
            fault_if_from_uspace(istate,"Page fault at %p",va);
474
            panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
473
            panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
475
        }
474
        }
476
    }
475
    }
477
}
476
}
478
 
477
 
479
/** Data TLB fault handler for faults with VHPT turned off.
478
/** Data TLB fault handler for faults with VHPT turned off.
480
 *
479
 *
481
 * @param vector Interruption vector.
480
 * @param vector Interruption vector.
482
 * @param istate Structure with saved interruption state.
481
 * @param istate Structure with saved interruption state.
483
 */
482
 */
484
void alternate_data_tlb_fault(uint64_t vector, istate_t *istate)
483
void alternate_data_tlb_fault(uint64_t vector, istate_t *istate)
485
{
484
{
486
    region_register rr;
485
    region_register rr;
487
    rid_t rid;
486
    rid_t rid;
488
    uintptr_t va;
487
    uintptr_t va;
489
    pte_t *t;
488
    pte_t *t;
490
   
489
   
491
    va = istate->cr_ifa;    /* faulting address */
490
    va = istate->cr_ifa;    /* faulting address */
492
    rr.word = rr_read(VA2VRN(va));
491
    rr.word = rr_read(VA2VRN(va));
493
    rid = rr.map.rid;
492
    rid = rr.map.rid;
494
    if (RID2ASID(rid) == ASID_KERNEL) {
493
    if (RID2ASID(rid) == ASID_KERNEL) {
495
        if (VA2VRN(va) == VRN_KERNEL) {
494
        if (VA2VRN(va) == VRN_KERNEL) {
496
            /*
495
            /*
497
             * Provide KA2PA(identity) mapping for faulting piece of
496
             * Provide KA2PA(identity) mapping for faulting piece of
498
             * kernel address space.
497
             * kernel address space.
499
             */
498
             */
500
            dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
499
            dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
501
            return;
500
            return;
502
        }
501
        }
503
    }
502
    }
504
 
503
 
505
    page_table_lock(AS, true);
504
    page_table_lock(AS, true);
506
    t = page_mapping_find(AS, va);
505
    t = page_mapping_find(AS, va);
507
    if (t) {
506
    if (t) {
508
        /*
507
        /*
509
         * The mapping was found in the software page hash table.
508
         * The mapping was found in the software page hash table.
510
         * Insert it into data translation cache.
509
         * Insert it into data translation cache.
511
         */
510
         */
512
        dtc_pte_copy(t);
511
        dtc_pte_copy(t);
513
        page_table_unlock(AS, true);
512
        page_table_unlock(AS, true);
514
    } else {
513
    } else {
515
        /*
514
        /*
516
         * Forward the page fault to the address space page fault handler.
515
         * Forward the page fault to the address space page fault handler.
517
         */
516
         */
518
        page_table_unlock(AS, true);
517
        page_table_unlock(AS, true);
519
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
518
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
520
            fault_if_from_uspace(istate,"Page fault at %p",va);
519
            fault_if_from_uspace(istate,"Page fault at %p",va);
521
            panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
520
            panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
522
        }
521
        }
523
    }
522
    }
524
}
523
}
525
 
524
 
526
/** Data nested TLB fault handler.
525
/** Data nested TLB fault handler.
527
 *
526
 *
528
 * This fault should not occur.
527
 * This fault should not occur.
529
 *
528
 *
530
 * @param vector Interruption vector.
529
 * @param vector Interruption vector.
531
 * @param istate Structure with saved interruption state.
530
 * @param istate Structure with saved interruption state.
532
 */
531
 */
533
void data_nested_tlb_fault(uint64_t vector, istate_t *istate)
532
void data_nested_tlb_fault(uint64_t vector, istate_t *istate)
534
{
533
{
535
    panic("%s\n", __FUNCTION__);
534
    panic("%s\n", __FUNCTION__);
536
}
535
}
537
 
536
 
538
/** Data Dirty bit fault handler.
537
/** Data Dirty bit fault handler.
539
 *
538
 *
540
 * @param vector Interruption vector.
539
 * @param vector Interruption vector.
541
 * @param istate Structure with saved interruption state.
540
 * @param istate Structure with saved interruption state.
542
 */
541
 */
543
void data_dirty_bit_fault(uint64_t vector, istate_t *istate)
542
void data_dirty_bit_fault(uint64_t vector, istate_t *istate)
544
{
543
{
545
    region_register rr;
544
    region_register rr;
546
    rid_t rid;
545
    rid_t rid;
547
    uintptr_t va;
546
    uintptr_t va;
548
    pte_t *t;
547
    pte_t *t;
549
   
548
   
550
    va = istate->cr_ifa;    /* faulting address */
549
    va = istate->cr_ifa;    /* faulting address */
551
    rr.word = rr_read(VA2VRN(va));
550
    rr.word = rr_read(VA2VRN(va));
552
    rid = rr.map.rid;
551
    rid = rr.map.rid;
553
 
552
 
554
    page_table_lock(AS, true);
553
    page_table_lock(AS, true);
555
    t = page_mapping_find(AS, va);
554
    t = page_mapping_find(AS, va);
556
    ASSERT(t && t->p);
555
    ASSERT(t && t->p);
557
    if (t && t->p && t->w) {
556
    if (t && t->p && t->w) {
558
        /*
557
        /*
559
         * Update the Dirty bit in page tables and reinsert
558
         * Update the Dirty bit in page tables and reinsert
560
         * the mapping into DTC.
559
         * the mapping into DTC.
561
         */
560
         */
562
        t->d = true;
561
        t->d = true;
563
        dtc_pte_copy(t);
562
        dtc_pte_copy(t);
564
    } else {
563
    } else {
565
        if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
564
        if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
566
            fault_if_from_uspace(istate,"Page fault at %p",va);
565
            fault_if_from_uspace(istate,"Page fault at %p",va);
567
            panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
566
            panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
568
            t->d = true;
567
            t->d = true;
569
            dtc_pte_copy(t);
568
            dtc_pte_copy(t);
570
        }
569
        }
571
    }
570
    }
572
    page_table_unlock(AS, true);
571
    page_table_unlock(AS, true);
573
}
572
}
574
 
573
 
575
/** Instruction access bit fault handler.
574
/** Instruction access bit fault handler.
576
 *
575
 *
577
 * @param vector Interruption vector.
576
 * @param vector Interruption vector.
578
 * @param istate Structure with saved interruption state.
577
 * @param istate Structure with saved interruption state.
579
 */
578
 */
580
void instruction_access_bit_fault(uint64_t vector, istate_t *istate)
579
void instruction_access_bit_fault(uint64_t vector, istate_t *istate)
581
{
580
{
582
    region_register rr;
581
    region_register rr;
583
    rid_t rid;
582
    rid_t rid;
584
    uintptr_t va;
583
    uintptr_t va;
585
    pte_t *t;  
584
    pte_t *t;  
586
 
585
 
587
    va = istate->cr_ifa;    /* faulting address */
586
    va = istate->cr_ifa;    /* faulting address */
588
    rr.word = rr_read(VA2VRN(va));
587
    rr.word = rr_read(VA2VRN(va));
589
    rid = rr.map.rid;
588
    rid = rr.map.rid;
590
 
589
 
591
    page_table_lock(AS, true);
590
    page_table_lock(AS, true);
592
    t = page_mapping_find(AS, va);
591
    t = page_mapping_find(AS, va);
593
    ASSERT(t && t->p);
592
    ASSERT(t && t->p);
594
    if (t && t->p && t->x) {
593
    if (t && t->p && t->x) {
595
        /*
594
        /*
596
         * Update the Accessed bit in page tables and reinsert
595
         * Update the Accessed bit in page tables and reinsert
597
         * the mapping into ITC.
596
         * the mapping into ITC.
598
         */
597
         */
599
        t->a = true;
598
        t->a = true;
600
        itc_pte_copy(t);
599
        itc_pte_copy(t);
601
    } else {
600
    } else {
602
        if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
601
        if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
603
            fault_if_from_uspace(istate,"Page fault at %p",va);
602
            fault_if_from_uspace(istate,"Page fault at %p",va);
604
            panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
603
            panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
605
            t->a = true;
604
            t->a = true;
606
            itc_pte_copy(t);
605
            itc_pte_copy(t);
607
        }
606
        }
608
    }
607
    }
609
    page_table_unlock(AS, true);
608
    page_table_unlock(AS, true);
610
}
609
}
611
 
610
 
612
/** Data access bit fault handler.
611
/** Data access bit fault handler.
613
 *
612
 *
614
 * @param vector Interruption vector.
613
 * @param vector Interruption vector.
615
 * @param istate Structure with saved interruption state.
614
 * @param istate Structure with saved interruption state.
616
 */
615
 */
617
void data_access_bit_fault(uint64_t vector, istate_t *istate)
616
void data_access_bit_fault(uint64_t vector, istate_t *istate)
618
{
617
{
619
    region_register rr;
618
    region_register rr;
620
    rid_t rid;
619
    rid_t rid;
621
    uintptr_t va;
620
    uintptr_t va;
622
    pte_t *t;
621
    pte_t *t;
623
 
622
 
624
    va = istate->cr_ifa;    /* faulting address */
623
    va = istate->cr_ifa;    /* faulting address */
625
    rr.word = rr_read(VA2VRN(va));
624
    rr.word = rr_read(VA2VRN(va));
626
    rid = rr.map.rid;
625
    rid = rr.map.rid;
627
 
626
 
628
    page_table_lock(AS, true);
627
    page_table_lock(AS, true);
629
    t = page_mapping_find(AS, va);
628
    t = page_mapping_find(AS, va);
630
    ASSERT(t && t->p);
629
    ASSERT(t && t->p);
631
    if (t && t->p) {
630
    if (t && t->p) {
632
        /*
631
        /*
633
         * Update the Accessed bit in page tables and reinsert
632
         * Update the Accessed bit in page tables and reinsert
634
         * the mapping into DTC.
633
         * the mapping into DTC.
635
         */
634
         */
636
        t->a = true;
635
        t->a = true;
637
        dtc_pte_copy(t);
636
        dtc_pte_copy(t);
638
    } else {
637
    } else {
639
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
638
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
640
            fault_if_from_uspace(istate,"Page fault at %p",va);
639
            fault_if_from_uspace(istate,"Page fault at %p",va);
641
            panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
640
            panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
642
            t->a = true;
641
            t->a = true;
643
            itc_pte_copy(t);
642
            itc_pte_copy(t);
644
        }
643
        }
645
    }
644
    }
646
    page_table_unlock(AS, true);
645
    page_table_unlock(AS, true);
647
}
646
}
648
 
647
 
649
/** Page not present fault handler.
648
/** Page not present fault handler.
650
 *
649
 *
651
 * @param vector Interruption vector.
650
 * @param vector Interruption vector.
652
 * @param istate Structure with saved interruption state.
651
 * @param istate Structure with saved interruption state.
653
 */
652
 */
654
void page_not_present(uint64_t vector, istate_t *istate)
653
void page_not_present(uint64_t vector, istate_t *istate)
655
{
654
{
656
    region_register rr;
655
    region_register rr;
657
    rid_t rid;
656
    rid_t rid;
658
    uintptr_t va;
657
    uintptr_t va;
659
    pte_t *t;
658
    pte_t *t;
660
   
659
   
661
    va = istate->cr_ifa;    /* faulting address */
660
    va = istate->cr_ifa;    /* faulting address */
662
    rr.word = rr_read(VA2VRN(va));
661
    rr.word = rr_read(VA2VRN(va));
663
    rid = rr.map.rid;
662
    rid = rr.map.rid;
664
 
663
 
665
    page_table_lock(AS, true);
664
    page_table_lock(AS, true);
666
    t = page_mapping_find(AS, va);
665
    t = page_mapping_find(AS, va);
667
    ASSERT(t);
666
    ASSERT(t);
668
   
667
   
669
    if (t->p) {
668
    if (t->p) {
670
        /*
669
        /*
671
         * If the Present bit is set in page hash table, just copy it
670
         * If the Present bit is set in page hash table, just copy it
672
         * and update ITC/DTC.
671
         * and update ITC/DTC.
673
         */
672
         */
674
        if (t->x)
673
        if (t->x)
675
            itc_pte_copy(t);
674
            itc_pte_copy(t);
676
        else
675
        else
677
            dtc_pte_copy(t);
676
            dtc_pte_copy(t);
678
        page_table_unlock(AS, true);
677
        page_table_unlock(AS, true);
679
    } else {
678
    } else {
680
        page_table_unlock(AS, true);
679
        page_table_unlock(AS, true);
681
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
680
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
682
            fault_if_from_uspace(istate,"Page fault at %p",va);
681
            fault_if_from_uspace(istate,"Page fault at %p",va);
683
            panic("%s: va=%p, rid=%d\n", __FUNCTION__, va, rid);
682
            panic("%s: va=%p, rid=%d\n", __FUNCTION__, va, rid);
684
        }
683
        }
685
    }
684
    }
686
}
685
}
687
 
686
 
688
/** @}
687
/** @}
689
 */
688
 */
690
 
689