Subversion Repositories HelenOS-historic

Rev

Rev 1735 | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1735 Rev 1780
1
/*
1
/*
2
 * Copyright (C) 2006 Jakub Jermar
2
 * Copyright (C) 2006 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
 /** @addtogroup ia64mm
29
 /** @addtogroup ia64mm
30
 * @{
30
 * @{
31
 */
31
 */
32
/** @file
32
/** @file
33
 */
33
 */
34
 
34
 
35
/*
35
/*
36
 * TLB management.
36
 * TLB management.
37
 */
37
 */
38
 
38
 
39
#include <mm/tlb.h>
39
#include <mm/tlb.h>
40
#include <mm/asid.h>
40
#include <mm/asid.h>
41
#include <mm/page.h>
41
#include <mm/page.h>
42
#include <mm/as.h>
42
#include <mm/as.h>
43
#include <arch/mm/tlb.h>
43
#include <arch/mm/tlb.h>
44
#include <arch/mm/page.h>
44
#include <arch/mm/page.h>
45
#include <arch/mm/vhpt.h>
45
#include <arch/mm/vhpt.h>
46
#include <arch/barrier.h>
46
#include <arch/barrier.h>
47
#include <arch/interrupt.h>
47
#include <arch/interrupt.h>
48
#include <arch/pal/pal.h>
48
#include <arch/pal/pal.h>
49
#include <arch/asm.h>
49
#include <arch/asm.h>
50
#include <typedefs.h>
50
#include <typedefs.h>
51
#include <panic.h>
51
#include <panic.h>
52
#include <print.h>
52
#include <print.h>
53
#include <arch.h>
53
#include <arch.h>
54
#include <interrupt.h>
54
#include <interrupt.h>
55
 
55
 
56
/** Invalidate all TLB entries. */
56
/** Invalidate all TLB entries. */
57
void tlb_invalidate_all(void)
57
void tlb_invalidate_all(void)
58
{
58
{
59
        ipl_t ipl;
59
        ipl_t ipl;
60
        __address adr;
60
        uintptr_t adr;
61
        __u32 count1, count2, stride1, stride2;
61
        uint32_t count1, count2, stride1, stride2;
62
       
62
       
63
        int i,j;
63
        int i,j;
64
       
64
       
65
        adr = PAL_PTCE_INFO_BASE();
65
        adr = PAL_PTCE_INFO_BASE();
66
        count1 = PAL_PTCE_INFO_COUNT1();
66
        count1 = PAL_PTCE_INFO_COUNT1();
67
        count2 = PAL_PTCE_INFO_COUNT2();
67
        count2 = PAL_PTCE_INFO_COUNT2();
68
        stride1 = PAL_PTCE_INFO_STRIDE1();
68
        stride1 = PAL_PTCE_INFO_STRIDE1();
69
        stride2 = PAL_PTCE_INFO_STRIDE2();
69
        stride2 = PAL_PTCE_INFO_STRIDE2();
70
       
70
       
71
        ipl = interrupts_disable();
71
        ipl = interrupts_disable();
72
 
72
 
73
        for(i = 0; i < count1; i++) {
73
        for(i = 0; i < count1; i++) {
74
            for(j = 0; j < count2; j++) {
74
            for(j = 0; j < count2; j++) {
75
                __asm__ volatile (
75
                __asm__ volatile (
76
                    "ptc.e %0 ;;"
76
                    "ptc.e %0 ;;"
77
                    :
77
                    :
78
                    : "r" (adr)
78
                    : "r" (adr)
79
                );
79
                );
80
                adr += stride2;
80
                adr += stride2;
81
            }
81
            }
82
            adr += stride1;
82
            adr += stride1;
83
        }
83
        }
84
 
84
 
85
        interrupts_restore(ipl);
85
        interrupts_restore(ipl);
86
 
86
 
87
        srlz_d();
87
        srlz_d();
88
        srlz_i();
88
        srlz_i();
89
#ifdef CONFIG_VHPT
89
#ifdef CONFIG_VHPT
90
        vhpt_invalidate_all();
90
        vhpt_invalidate_all();
91
#endif  
91
#endif  
92
}
92
}
93
 
93
 
94
/** Invalidate entries belonging to an address space.
94
/** Invalidate entries belonging to an address space.
95
 *
95
 *
96
 * @param asid Address space identifier.
96
 * @param asid Address space identifier.
97
 */
97
 */
98
void tlb_invalidate_asid(asid_t asid)
98
void tlb_invalidate_asid(asid_t asid)
99
{
99
{
100
    tlb_invalidate_all();
100
    tlb_invalidate_all();
101
}
101
}
102
 
102
 
103
 
103
 
104
void tlb_invalidate_pages(asid_t asid, __address page, count_t cnt)
104
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
105
{
105
{
106
    region_register rr;
106
    region_register rr;
107
    bool restore_rr = false;
107
    bool restore_rr = false;
108
    int b = 0;
108
    int b = 0;
109
    int c = cnt;
109
    int c = cnt;
110
 
110
 
111
    __address va;
111
    uintptr_t va;
112
    va = page;
112
    va = page;
113
 
113
 
114
    rr.word = rr_read(VA2VRN(va));
114
    rr.word = rr_read(VA2VRN(va));
115
    if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
115
    if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
116
        /*
116
        /*
117
         * The selected region register does not contain required RID.
117
         * The selected region register does not contain required RID.
118
         * Save the old content of the register and replace the RID.
118
         * Save the old content of the register and replace the RID.
119
         */
119
         */
120
        region_register rr0;
120
        region_register rr0;
121
 
121
 
122
        rr0 = rr;
122
        rr0 = rr;
123
        rr0.map.rid = ASID2RID(asid, VA2VRN(va));
123
        rr0.map.rid = ASID2RID(asid, VA2VRN(va));
124
        rr_write(VA2VRN(va), rr0.word);
124
        rr_write(VA2VRN(va), rr0.word);
125
        srlz_d();
125
        srlz_d();
126
        srlz_i();
126
        srlz_i();
127
    }
127
    }
128
   
128
   
129
    while(c >>= 1)
129
    while(c >>= 1)
130
        b++;
130
        b++;
131
    b >>= 1;
131
    b >>= 1;
132
    __u64 ps;
132
    uint64_t ps;
133
   
133
   
134
    switch (b) {
134
    switch (b) {
135
        case 0: /*cnt 1-3*/
135
        case 0: /*cnt 1-3*/
136
            ps = PAGE_WIDTH;
136
            ps = PAGE_WIDTH;
137
            break;
137
            break;
138
        case 1: /*cnt 4-15*/
138
        case 1: /*cnt 4-15*/
139
            /*cnt=((cnt-1)/4)+1;*/
139
            /*cnt=((cnt-1)/4)+1;*/
140
            ps = PAGE_WIDTH+2;
140
            ps = PAGE_WIDTH+2;
141
            va &= ~((1<<ps)-1);
141
            va &= ~((1<<ps)-1);
142
            break;
142
            break;
143
        case 2: /*cnt 16-63*/
143
        case 2: /*cnt 16-63*/
144
            /*cnt=((cnt-1)/16)+1;*/
144
            /*cnt=((cnt-1)/16)+1;*/
145
            ps = PAGE_WIDTH+4;
145
            ps = PAGE_WIDTH+4;
146
            va &= ~((1<<ps)-1);
146
            va &= ~((1<<ps)-1);
147
            break;
147
            break;
148
        case 3: /*cnt 64-255*/
148
        case 3: /*cnt 64-255*/
149
            /*cnt=((cnt-1)/64)+1;*/
149
            /*cnt=((cnt-1)/64)+1;*/
150
            ps = PAGE_WIDTH+6;
150
            ps = PAGE_WIDTH+6;
151
            va &= ~((1<<ps)-1);
151
            va &= ~((1<<ps)-1);
152
            break;
152
            break;
153
        case 4: /*cnt 256-1023*/
153
        case 4: /*cnt 256-1023*/
154
            /*cnt=((cnt-1)/256)+1;*/
154
            /*cnt=((cnt-1)/256)+1;*/
155
            ps = PAGE_WIDTH+8;
155
            ps = PAGE_WIDTH+8;
156
            va &= ~((1<<ps)-1);
156
            va &= ~((1<<ps)-1);
157
            break;
157
            break;
158
        case 5: /*cnt 1024-4095*/
158
        case 5: /*cnt 1024-4095*/
159
            /*cnt=((cnt-1)/1024)+1;*/
159
            /*cnt=((cnt-1)/1024)+1;*/
160
            ps = PAGE_WIDTH+10;
160
            ps = PAGE_WIDTH+10;
161
            va &= ~((1<<ps)-1);
161
            va &= ~((1<<ps)-1);
162
            break;
162
            break;
163
        case 6: /*cnt 4096-16383*/
163
        case 6: /*cnt 4096-16383*/
164
            /*cnt=((cnt-1)/4096)+1;*/
164
            /*cnt=((cnt-1)/4096)+1;*/
165
            ps = PAGE_WIDTH+12;
165
            ps = PAGE_WIDTH+12;
166
            va &= ~((1<<ps)-1);
166
            va &= ~((1<<ps)-1);
167
            break;
167
            break;
168
        case 7: /*cnt 16384-65535*/
168
        case 7: /*cnt 16384-65535*/
169
        case 8: /*cnt 65536-(256K-1)*/
169
        case 8: /*cnt 65536-(256K-1)*/
170
            /*cnt=((cnt-1)/16384)+1;*/
170
            /*cnt=((cnt-1)/16384)+1;*/
171
            ps = PAGE_WIDTH+14;
171
            ps = PAGE_WIDTH+14;
172
            va &= ~((1<<ps)-1);
172
            va &= ~((1<<ps)-1);
173
            break;
173
            break;
174
        default:
174
        default:
175
            /*cnt=((cnt-1)/(16384*16))+1;*/
175
            /*cnt=((cnt-1)/(16384*16))+1;*/
176
            ps=PAGE_WIDTH+18;
176
            ps=PAGE_WIDTH+18;
177
            va&=~((1<<ps)-1);
177
            va&=~((1<<ps)-1);
178
            break;
178
            break;
179
    }
179
    }
180
    /*cnt+=(page!=va);*/
180
    /*cnt+=(page!=va);*/
181
    for(; va<(page+cnt*(PAGE_SIZE)); va += (1<<ps)) {
181
    for(; va<(page+cnt*(PAGE_SIZE)); va += (1<<ps)) {
182
        __asm__ volatile (
182
        __asm__ volatile (
183
            "ptc.l %0,%1;;"
183
            "ptc.l %0,%1;;"
184
            :
184
            :
185
            : "r" (va), "r" (ps<<2)
185
            : "r" (va), "r" (ps<<2)
186
        );
186
        );
187
    }
187
    }
188
    srlz_d();
188
    srlz_d();
189
    srlz_i();
189
    srlz_i();
190
   
190
   
191
    if (restore_rr) {
191
    if (restore_rr) {
192
        rr_write(VA2VRN(va), rr.word);
192
        rr_write(VA2VRN(va), rr.word);
193
        srlz_d();
193
        srlz_d();
194
        srlz_i();
194
        srlz_i();
195
    }
195
    }
196
}
196
}
197
 
197
 
198
/** Insert data into data translation cache.
198
/** Insert data into data translation cache.
199
 *
199
 *
200
 * @param va Virtual page address.
200
 * @param va Virtual page address.
201
 * @param asid Address space identifier.
201
 * @param asid Address space identifier.
202
 * @param entry The rest of TLB entry as required by TLB insertion format.
202
 * @param entry The rest of TLB entry as required by TLB insertion format.
203
 */
203
 */
204
void dtc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry)
204
void dtc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
205
{
205
{
206
    tc_mapping_insert(va, asid, entry, true);
206
    tc_mapping_insert(va, asid, entry, true);
207
}
207
}
208
 
208
 
209
/** Insert data into instruction translation cache.
209
/** Insert data into instruction translation cache.
210
 *
210
 *
211
 * @param va Virtual page address.
211
 * @param va Virtual page address.
212
 * @param asid Address space identifier.
212
 * @param asid Address space identifier.
213
 * @param entry The rest of TLB entry as required by TLB insertion format.
213
 * @param entry The rest of TLB entry as required by TLB insertion format.
214
 */
214
 */
215
void itc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry)
215
void itc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
216
{
216
{
217
    tc_mapping_insert(va, asid, entry, false);
217
    tc_mapping_insert(va, asid, entry, false);
218
}
218
}
219
 
219
 
220
/** Insert data into instruction or data translation cache.
220
/** Insert data into instruction or data translation cache.
221
 *
221
 *
222
 * @param va Virtual page address.
222
 * @param va Virtual page address.
223
 * @param asid Address space identifier.
223
 * @param asid Address space identifier.
224
 * @param entry The rest of TLB entry as required by TLB insertion format.
224
 * @param entry The rest of TLB entry as required by TLB insertion format.
225
 * @param dtc If true, insert into data translation cache, use instruction translation cache otherwise.
225
 * @param dtc If true, insert into data translation cache, use instruction translation cache otherwise.
226
 */
226
 */
227
void tc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtc)
227
void tc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtc)
228
{
228
{
229
    region_register rr;
229
    region_register rr;
230
    bool restore_rr = false;
230
    bool restore_rr = false;
231
 
231
 
232
    rr.word = rr_read(VA2VRN(va));
232
    rr.word = rr_read(VA2VRN(va));
233
    if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
233
    if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
234
        /*
234
        /*
235
         * The selected region register does not contain required RID.
235
         * The selected region register does not contain required RID.
236
         * Save the old content of the register and replace the RID.
236
         * Save the old content of the register and replace the RID.
237
         */
237
         */
238
        region_register rr0;
238
        region_register rr0;
239
 
239
 
240
        rr0 = rr;
240
        rr0 = rr;
241
        rr0.map.rid = ASID2RID(asid, VA2VRN(va));
241
        rr0.map.rid = ASID2RID(asid, VA2VRN(va));
242
        rr_write(VA2VRN(va), rr0.word);
242
        rr_write(VA2VRN(va), rr0.word);
243
        srlz_d();
243
        srlz_d();
244
        srlz_i();
244
        srlz_i();
245
    }
245
    }
246
   
246
   
247
    __asm__ volatile (
247
    __asm__ volatile (
248
        "mov r8=psr;;\n"
248
        "mov r8=psr;;\n"
249
        "rsm %0;;\n"            /* PSR_IC_MASK */
249
        "rsm %0;;\n"            /* PSR_IC_MASK */
250
        "srlz.d;;\n"
250
        "srlz.d;;\n"
251
        "srlz.i;;\n"
251
        "srlz.i;;\n"
252
        "mov cr.ifa=%1\n"       /* va */
252
        "mov cr.ifa=%1\n"       /* va */
253
        "mov cr.itir=%2;;\n"        /* entry.word[1] */
253
        "mov cr.itir=%2;;\n"        /* entry.word[1] */
254
        "cmp.eq p6,p7 = %4,r0;;\n"  /* decide between itc and dtc */
254
        "cmp.eq p6,p7 = %4,r0;;\n"  /* decide between itc and dtc */
255
        "(p6) itc.i %3;;\n"
255
        "(p6) itc.i %3;;\n"
256
        "(p7) itc.d %3;;\n"
256
        "(p7) itc.d %3;;\n"
257
        "mov psr.l=r8;;\n"
257
        "mov psr.l=r8;;\n"
258
        "srlz.d;;\n"
258
        "srlz.d;;\n"
259
        :
259
        :
260
        : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc)
260
        : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc)
261
        : "p6", "p7", "r8"
261
        : "p6", "p7", "r8"
262
    );
262
    );
263
   
263
   
264
    if (restore_rr) {
264
    if (restore_rr) {
265
        rr_write(VA2VRN(va), rr.word);
265
        rr_write(VA2VRN(va), rr.word);
266
        srlz_d();
266
        srlz_d();
267
        srlz_i();
267
        srlz_i();
268
    }
268
    }
269
}
269
}
270
 
270
 
271
/** Insert data into instruction translation register.
271
/** Insert data into instruction translation register.
272
 *
272
 *
273
 * @param va Virtual page address.
273
 * @param va Virtual page address.
274
 * @param asid Address space identifier.
274
 * @param asid Address space identifier.
275
 * @param entry The rest of TLB entry as required by TLB insertion format.
275
 * @param entry The rest of TLB entry as required by TLB insertion format.
276
 * @param tr Translation register.
276
 * @param tr Translation register.
277
 */
277
 */
278
void itr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
278
void itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr)
279
{
279
{
280
    tr_mapping_insert(va, asid, entry, false, tr);
280
    tr_mapping_insert(va, asid, entry, false, tr);
281
}
281
}
282
 
282
 
283
/** Insert data into data translation register.
283
/** Insert data into data translation register.
284
 *
284
 *
285
 * @param va Virtual page address.
285
 * @param va Virtual page address.
286
 * @param asid Address space identifier.
286
 * @param asid Address space identifier.
287
 * @param entry The rest of TLB entry as required by TLB insertion format.
287
 * @param entry The rest of TLB entry as required by TLB insertion format.
288
 * @param tr Translation register.
288
 * @param tr Translation register.
289
 */
289
 */
290
void dtr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
290
void dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr)
291
{
291
{
292
    tr_mapping_insert(va, asid, entry, true, tr);
292
    tr_mapping_insert(va, asid, entry, true, tr);
293
}
293
}
294
 
294
 
295
/** Insert data into instruction or data translation register.
295
/** Insert data into instruction or data translation register.
296
 *
296
 *
297
 * @param va Virtual page address.
297
 * @param va Virtual page address.
298
 * @param asid Address space identifier.
298
 * @param asid Address space identifier.
299
 * @param entry The rest of TLB entry as required by TLB insertion format.
299
 * @param entry The rest of TLB entry as required by TLB insertion format.
300
 * @param dtr If true, insert into data translation register, use instruction translation register otherwise.
300
 * @param dtr If true, insert into data translation register, use instruction translation register otherwise.
301
 * @param tr Translation register.
301
 * @param tr Translation register.
302
 */
302
 */
303
void tr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr)
303
void tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr)
304
{
304
{
305
    region_register rr;
305
    region_register rr;
306
    bool restore_rr = false;
306
    bool restore_rr = false;
307
 
307
 
308
    rr.word = rr_read(VA2VRN(va));
308
    rr.word = rr_read(VA2VRN(va));
309
    if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
309
    if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
310
        /*
310
        /*
311
         * The selected region register does not contain required RID.
311
         * The selected region register does not contain required RID.
312
         * Save the old content of the register and replace the RID.
312
         * Save the old content of the register and replace the RID.
313
         */
313
         */
314
        region_register rr0;
314
        region_register rr0;
315
 
315
 
316
        rr0 = rr;
316
        rr0 = rr;
317
        rr0.map.rid = ASID2RID(asid, VA2VRN(va));
317
        rr0.map.rid = ASID2RID(asid, VA2VRN(va));
318
        rr_write(VA2VRN(va), rr0.word);
318
        rr_write(VA2VRN(va), rr0.word);
319
        srlz_d();
319
        srlz_d();
320
        srlz_i();
320
        srlz_i();
321
    }
321
    }
322
 
322
 
323
    __asm__ volatile (
323
    __asm__ volatile (
324
        "mov r8=psr;;\n"
324
        "mov r8=psr;;\n"
325
        "rsm %0;;\n"            /* PSR_IC_MASK */
325
        "rsm %0;;\n"            /* PSR_IC_MASK */
326
        "srlz.d;;\n"
326
        "srlz.d;;\n"
327
        "srlz.i;;\n"
327
        "srlz.i;;\n"
328
        "mov cr.ifa=%1\n"           /* va */         
328
        "mov cr.ifa=%1\n"           /* va */         
329
        "mov cr.itir=%2;;\n"        /* entry.word[1] */
329
        "mov cr.itir=%2;;\n"        /* entry.word[1] */
330
        "cmp.eq p6,p7=%5,r0;;\n"    /* decide between itr and dtr */
330
        "cmp.eq p6,p7=%5,r0;;\n"    /* decide between itr and dtr */
331
        "(p6) itr.i itr[%4]=%3;;\n"
331
        "(p6) itr.i itr[%4]=%3;;\n"
332
        "(p7) itr.d dtr[%4]=%3;;\n"
332
        "(p7) itr.d dtr[%4]=%3;;\n"
333
        "mov psr.l=r8;;\n"
333
        "mov psr.l=r8;;\n"
334
        "srlz.d;;\n"
334
        "srlz.d;;\n"
335
        :
335
        :
336
        : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr)
336
        : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr)
337
        : "p6", "p7", "r8"
337
        : "p6", "p7", "r8"
338
    );
338
    );
339
   
339
   
340
    if (restore_rr) {
340
    if (restore_rr) {
341
        rr_write(VA2VRN(va), rr.word);
341
        rr_write(VA2VRN(va), rr.word);
342
        srlz_d();
342
        srlz_d();
343
        srlz_i();
343
        srlz_i();
344
    }
344
    }
345
}
345
}
346
 
346
 
347
/** Insert data into DTLB.
347
/** Insert data into DTLB.
348
 *
348
 *
349
 * @param page Virtual page address including VRN bits.
349
 * @param page Virtual page address including VRN bits.
350
 * @param frame Physical frame address.
350
 * @param frame Physical frame address.
351
 * @param dtr If true, insert into data translation register, use data translation cache otherwise.
351
 * @param dtr If true, insert into data translation register, use data translation cache otherwise.
352
 * @param tr Translation register if dtr is true, ignored otherwise.
352
 * @param tr Translation register if dtr is true, ignored otherwise.
353
 */
353
 */
354
void dtlb_kernel_mapping_insert(__address page, __address frame, bool dtr, index_t tr)
354
void dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr, index_t tr)
355
{
355
{
356
    tlb_entry_t entry;
356
    tlb_entry_t entry;
357
   
357
   
358
    entry.word[0] = 0;
358
    entry.word[0] = 0;
359
    entry.word[1] = 0;
359
    entry.word[1] = 0;
360
   
360
   
361
    entry.p = true;         /* present */
361
    entry.p = true;         /* present */
362
    entry.ma = MA_WRITEBACK;
362
    entry.ma = MA_WRITEBACK;
363
    entry.a = true;         /* already accessed */
363
    entry.a = true;         /* already accessed */
364
    entry.d = true;         /* already dirty */
364
    entry.d = true;         /* already dirty */
365
    entry.pl = PL_KERNEL;
365
    entry.pl = PL_KERNEL;
366
    entry.ar = AR_READ | AR_WRITE;
366
    entry.ar = AR_READ | AR_WRITE;
367
    entry.ppn = frame >> PPN_SHIFT;
367
    entry.ppn = frame >> PPN_SHIFT;
368
    entry.ps = PAGE_WIDTH;
368
    entry.ps = PAGE_WIDTH;
369
   
369
   
370
    if (dtr)
370
    if (dtr)
371
        dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
371
        dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
372
    else
372
    else
373
        dtc_mapping_insert(page, ASID_KERNEL, entry);
373
        dtc_mapping_insert(page, ASID_KERNEL, entry);
374
}
374
}
375
 
375
 
376
/** Purge kernel entries from DTR.
376
/** Purge kernel entries from DTR.
377
 *
377
 *
378
 * Purge DTR entries used by the kernel.
378
 * Purge DTR entries used by the kernel.
379
 *
379
 *
380
 * @param page Virtual page address including VRN bits.
380
 * @param page Virtual page address including VRN bits.
381
 * @param width Width of the purge in bits.
381
 * @param width Width of the purge in bits.
382
 */
382
 */
383
void dtr_purge(__address page, count_t width)
383
void dtr_purge(uintptr_t page, count_t width)
384
{
384
{
385
    __asm__ volatile ("ptr.d %0, %1\n" : : "r" (page), "r" (width<<2));
385
    __asm__ volatile ("ptr.d %0, %1\n" : : "r" (page), "r" (width<<2));
386
}
386
}
387
 
387
 
388
 
388
 
389
/** Copy content of PTE into data translation cache.
389
/** Copy content of PTE into data translation cache.
390
 *
390
 *
391
 * @param t PTE.
391
 * @param t PTE.
392
 */
392
 */
393
void dtc_pte_copy(pte_t *t)
393
void dtc_pte_copy(pte_t *t)
394
{
394
{
395
    tlb_entry_t entry;
395
    tlb_entry_t entry;
396
 
396
 
397
    entry.word[0] = 0;
397
    entry.word[0] = 0;
398
    entry.word[1] = 0;
398
    entry.word[1] = 0;
399
   
399
   
400
    entry.p = t->p;
400
    entry.p = t->p;
401
    entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
401
    entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
402
    entry.a = t->a;
402
    entry.a = t->a;
403
    entry.d = t->d;
403
    entry.d = t->d;
404
    entry.pl = t->k ? PL_KERNEL : PL_USER;
404
    entry.pl = t->k ? PL_KERNEL : PL_USER;
405
    entry.ar = t->w ? AR_WRITE : AR_READ;
405
    entry.ar = t->w ? AR_WRITE : AR_READ;
406
    entry.ppn = t->frame >> PPN_SHIFT;
406
    entry.ppn = t->frame >> PPN_SHIFT;
407
    entry.ps = PAGE_WIDTH;
407
    entry.ps = PAGE_WIDTH;
408
   
408
   
409
    dtc_mapping_insert(t->page, t->as->asid, entry);
409
    dtc_mapping_insert(t->page, t->as->asid, entry);
410
#ifdef CONFIG_VHPT
410
#ifdef CONFIG_VHPT
411
    vhpt_mapping_insert(t->page, t->as->asid, entry);
411
    vhpt_mapping_insert(t->page, t->as->asid, entry);
412
#endif  
412
#endif  
413
}
413
}
414
 
414
 
415
/** Copy content of PTE into instruction translation cache.
415
/** Copy content of PTE into instruction translation cache.
416
 *
416
 *
417
 * @param t PTE.
417
 * @param t PTE.
418
 */
418
 */
419
void itc_pte_copy(pte_t *t)
419
void itc_pte_copy(pte_t *t)
420
{
420
{
421
    tlb_entry_t entry;
421
    tlb_entry_t entry;
422
 
422
 
423
    entry.word[0] = 0;
423
    entry.word[0] = 0;
424
    entry.word[1] = 0;
424
    entry.word[1] = 0;
425
   
425
   
426
    ASSERT(t->x);
426
    ASSERT(t->x);
427
   
427
   
428
    entry.p = t->p;
428
    entry.p = t->p;
429
    entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
429
    entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
430
    entry.a = t->a;
430
    entry.a = t->a;
431
    entry.pl = t->k ? PL_KERNEL : PL_USER;
431
    entry.pl = t->k ? PL_KERNEL : PL_USER;
432
    entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
432
    entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
433
    entry.ppn = t->frame >> PPN_SHIFT;
433
    entry.ppn = t->frame >> PPN_SHIFT;
434
    entry.ps = PAGE_WIDTH;
434
    entry.ps = PAGE_WIDTH;
435
   
435
   
436
    itc_mapping_insert(t->page, t->as->asid, entry);
436
    itc_mapping_insert(t->page, t->as->asid, entry);
437
#ifdef CONFIG_VHPT
437
#ifdef CONFIG_VHPT
438
    vhpt_mapping_insert(t->page, t->as->asid, entry);
438
    vhpt_mapping_insert(t->page, t->as->asid, entry);
439
#endif  
439
#endif  
440
}
440
}
441
 
441
 
442
/** Instruction TLB fault handler for faults with VHPT turned off.
442
/** Instruction TLB fault handler for faults with VHPT turned off.
443
 *
443
 *
444
 * @param vector Interruption vector.
444
 * @param vector Interruption vector.
445
 * @param istate Structure with saved interruption state.
445
 * @param istate Structure with saved interruption state.
446
 */
446
 */
447
void alternate_instruction_tlb_fault(__u64 vector, istate_t *istate)
447
void alternate_instruction_tlb_fault(uint64_t vector, istate_t *istate)
448
{
448
{
449
    region_register rr;
449
    region_register rr;
450
    rid_t rid;
450
    rid_t rid;
451
    __address va;
451
    uintptr_t va;
452
    pte_t *t;
452
    pte_t *t;
453
   
453
   
454
    va = istate->cr_ifa;    /* faulting address */
454
    va = istate->cr_ifa;    /* faulting address */
455
    rr.word = rr_read(VA2VRN(va));
455
    rr.word = rr_read(VA2VRN(va));
456
    rid = rr.map.rid;
456
    rid = rr.map.rid;
457
 
457
 
458
    page_table_lock(AS, true);
458
    page_table_lock(AS, true);
459
    t = page_mapping_find(AS, va);
459
    t = page_mapping_find(AS, va);
460
    if (t) {
460
    if (t) {
461
        /*
461
        /*
462
         * The mapping was found in software page hash table.
462
         * The mapping was found in software page hash table.
463
         * Insert it into data translation cache.
463
         * Insert it into data translation cache.
464
         */
464
         */
465
        itc_pte_copy(t);
465
        itc_pte_copy(t);
466
        page_table_unlock(AS, true);
466
        page_table_unlock(AS, true);
467
    } else {
467
    } else {
468
        /*
468
        /*
469
         * Forward the page fault to address space page fault handler.
469
         * Forward the page fault to address space page fault handler.
470
         */
470
         */
471
        page_table_unlock(AS, true);
471
        page_table_unlock(AS, true);
472
        if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
472
        if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
473
            fault_if_from_uspace(istate,"Page fault at %p",va);
473
            fault_if_from_uspace(istate,"Page fault at %p",va);
474
            panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
474
            panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
475
        }
475
        }
476
    }
476
    }
477
}
477
}
478
 
478
 
479
/** Data TLB fault handler for faults with VHPT turned off.
479
/** Data TLB fault handler for faults with VHPT turned off.
480
 *
480
 *
481
 * @param vector Interruption vector.
481
 * @param vector Interruption vector.
482
 * @param istate Structure with saved interruption state.
482
 * @param istate Structure with saved interruption state.
483
 */
483
 */
484
void alternate_data_tlb_fault(__u64 vector, istate_t *istate)
484
void alternate_data_tlb_fault(uint64_t vector, istate_t *istate)
485
{
485
{
486
    region_register rr;
486
    region_register rr;
487
    rid_t rid;
487
    rid_t rid;
488
    __address va;
488
    uintptr_t va;
489
    pte_t *t;
489
    pte_t *t;
490
   
490
   
491
    va = istate->cr_ifa;    /* faulting address */
491
    va = istate->cr_ifa;    /* faulting address */
492
    rr.word = rr_read(VA2VRN(va));
492
    rr.word = rr_read(VA2VRN(va));
493
    rid = rr.map.rid;
493
    rid = rr.map.rid;
494
    if (RID2ASID(rid) == ASID_KERNEL) {
494
    if (RID2ASID(rid) == ASID_KERNEL) {
495
        if (VA2VRN(va) == VRN_KERNEL) {
495
        if (VA2VRN(va) == VRN_KERNEL) {
496
            /*
496
            /*
497
             * Provide KA2PA(identity) mapping for faulting piece of
497
             * Provide KA2PA(identity) mapping for faulting piece of
498
             * kernel address space.
498
             * kernel address space.
499
             */
499
             */
500
            dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
500
            dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
501
            return;
501
            return;
502
        }
502
        }
503
    }
503
    }
504
 
504
 
505
    page_table_lock(AS, true);
505
    page_table_lock(AS, true);
506
    t = page_mapping_find(AS, va);
506
    t = page_mapping_find(AS, va);
507
    if (t) {
507
    if (t) {
508
        /*
508
        /*
509
         * The mapping was found in software page hash table.
509
         * The mapping was found in software page hash table.
510
         * Insert it into data translation cache.
510
         * Insert it into data translation cache.
511
         */
511
         */
512
        dtc_pte_copy(t);
512
        dtc_pte_copy(t);
513
        page_table_unlock(AS, true);
513
        page_table_unlock(AS, true);
514
    } else {
514
    } else {
515
        /*
515
        /*
516
         * Forward the page fault to address space page fault handler.
516
         * Forward the page fault to address space page fault handler.
517
         */
517
         */
518
        page_table_unlock(AS, true);
518
        page_table_unlock(AS, true);
519
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
519
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
520
            fault_if_from_uspace(istate,"Page fault at %p",va);
520
            fault_if_from_uspace(istate,"Page fault at %p",va);
521
            panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
521
            panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
522
        }
522
        }
523
    }
523
    }
524
}
524
}
525
 
525
 
526
/** Data nested TLB fault handler.
526
/** Data nested TLB fault handler.
527
 *
527
 *
528
 * This fault should not occur.
528
 * This fault should not occur.
529
 *
529
 *
530
 * @param vector Interruption vector.
530
 * @param vector Interruption vector.
531
 * @param istate Structure with saved interruption state.
531
 * @param istate Structure with saved interruption state.
532
 */
532
 */
533
void data_nested_tlb_fault(__u64 vector, istate_t *istate)
533
void data_nested_tlb_fault(uint64_t vector, istate_t *istate)
534
{
534
{
535
    panic("%s\n", __FUNCTION__);
535
    panic("%s\n", __FUNCTION__);
536
}
536
}
537
 
537
 
538
/** Data Dirty bit fault handler.
538
/** Data Dirty bit fault handler.
539
 *
539
 *
540
 * @param vector Interruption vector.
540
 * @param vector Interruption vector.
541
 * @param istate Structure with saved interruption state.
541
 * @param istate Structure with saved interruption state.
542
 */
542
 */
543
void data_dirty_bit_fault(__u64 vector, istate_t *istate)
543
void data_dirty_bit_fault(uint64_t vector, istate_t *istate)
544
{
544
{
545
    region_register rr;
545
    region_register rr;
546
    rid_t rid;
546
    rid_t rid;
547
    __address va;
547
    uintptr_t va;
548
    pte_t *t;
548
    pte_t *t;
549
   
549
   
550
    va = istate->cr_ifa;    /* faulting address */
550
    va = istate->cr_ifa;    /* faulting address */
551
    rr.word = rr_read(VA2VRN(va));
551
    rr.word = rr_read(VA2VRN(va));
552
    rid = rr.map.rid;
552
    rid = rr.map.rid;
553
 
553
 
554
    page_table_lock(AS, true);
554
    page_table_lock(AS, true);
555
    t = page_mapping_find(AS, va);
555
    t = page_mapping_find(AS, va);
556
    ASSERT(t && t->p);
556
    ASSERT(t && t->p);
557
    if (t && t->p && t->w) {
557
    if (t && t->p && t->w) {
558
        /*
558
        /*
559
         * Update the Dirty bit in page tables and reinsert
559
         * Update the Dirty bit in page tables and reinsert
560
         * the mapping into DTC.
560
         * the mapping into DTC.
561
         */
561
         */
562
        t->d = true;
562
        t->d = true;
563
        dtc_pte_copy(t);
563
        dtc_pte_copy(t);
564
    } else {
564
    } else {
565
        if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
565
        if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
566
            fault_if_from_uspace(istate,"Page fault at %p",va);
566
            fault_if_from_uspace(istate,"Page fault at %p",va);
567
            panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
567
            panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
568
            t->d = true;
568
            t->d = true;
569
            dtc_pte_copy(t);
569
            dtc_pte_copy(t);
570
        }
570
        }
571
    }
571
    }
572
    page_table_unlock(AS, true);
572
    page_table_unlock(AS, true);
573
}
573
}
574
 
574
 
575
/** Instruction access bit fault handler.
575
/** Instruction access bit fault handler.
576
 *
576
 *
577
 * @param vector Interruption vector.
577
 * @param vector Interruption vector.
578
 * @param istate Structure with saved interruption state.
578
 * @param istate Structure with saved interruption state.
579
 */
579
 */
580
void instruction_access_bit_fault(__u64 vector, istate_t *istate)
580
void instruction_access_bit_fault(uint64_t vector, istate_t *istate)
581
{
581
{
582
    region_register rr;
582
    region_register rr;
583
    rid_t rid;
583
    rid_t rid;
584
    __address va;
584
    uintptr_t va;
585
    pte_t *t;  
585
    pte_t *t;  
586
 
586
 
587
    va = istate->cr_ifa;    /* faulting address */
587
    va = istate->cr_ifa;    /* faulting address */
588
    rr.word = rr_read(VA2VRN(va));
588
    rr.word = rr_read(VA2VRN(va));
589
    rid = rr.map.rid;
589
    rid = rr.map.rid;
590
 
590
 
591
    page_table_lock(AS, true);
591
    page_table_lock(AS, true);
592
    t = page_mapping_find(AS, va);
592
    t = page_mapping_find(AS, va);
593
    ASSERT(t && t->p);
593
    ASSERT(t && t->p);
594
    if (t && t->p && t->x) {
594
    if (t && t->p && t->x) {
595
        /*
595
        /*
596
         * Update the Accessed bit in page tables and reinsert
596
         * Update the Accessed bit in page tables and reinsert
597
         * the mapping into ITC.
597
         * the mapping into ITC.
598
         */
598
         */
599
        t->a = true;
599
        t->a = true;
600
        itc_pte_copy(t);
600
        itc_pte_copy(t);
601
    } else {
601
    } else {
602
        if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
602
        if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
603
            fault_if_from_uspace(istate,"Page fault at %p",va);
603
            fault_if_from_uspace(istate,"Page fault at %p",va);
604
            panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
604
            panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
605
            t->a = true;
605
            t->a = true;
606
            itc_pte_copy(t);
606
            itc_pte_copy(t);
607
        }
607
        }
608
    }
608
    }
609
    page_table_unlock(AS, true);
609
    page_table_unlock(AS, true);
610
}
610
}
611
 
611
 
612
/** Data access bit fault handler.
612
/** Data access bit fault handler.
613
 *
613
 *
614
 * @param vector Interruption vector.
614
 * @param vector Interruption vector.
615
 * @param istate Structure with saved interruption state.
615
 * @param istate Structure with saved interruption state.
616
 */
616
 */
617
void data_access_bit_fault(__u64 vector, istate_t *istate)
617
void data_access_bit_fault(uint64_t vector, istate_t *istate)
618
{
618
{
619
    region_register rr;
619
    region_register rr;
620
    rid_t rid;
620
    rid_t rid;
621
    __address va;
621
    uintptr_t va;
622
    pte_t *t;
622
    pte_t *t;
623
 
623
 
624
    va = istate->cr_ifa;    /* faulting address */
624
    va = istate->cr_ifa;    /* faulting address */
625
    rr.word = rr_read(VA2VRN(va));
625
    rr.word = rr_read(VA2VRN(va));
626
    rid = rr.map.rid;
626
    rid = rr.map.rid;
627
 
627
 
628
    page_table_lock(AS, true);
628
    page_table_lock(AS, true);
629
    t = page_mapping_find(AS, va);
629
    t = page_mapping_find(AS, va);
630
    ASSERT(t && t->p);
630
    ASSERT(t && t->p);
631
    if (t && t->p) {
631
    if (t && t->p) {
632
        /*
632
        /*
633
         * Update the Accessed bit in page tables and reinsert
633
         * Update the Accessed bit in page tables and reinsert
634
         * the mapping into DTC.
634
         * the mapping into DTC.
635
         */
635
         */
636
        t->a = true;
636
        t->a = true;
637
        dtc_pte_copy(t);
637
        dtc_pte_copy(t);
638
    } else {
638
    } else {
639
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
639
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
640
            fault_if_from_uspace(istate,"Page fault at %p",va);
640
            fault_if_from_uspace(istate,"Page fault at %p",va);
641
            panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
641
            panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
642
            t->a = true;
642
            t->a = true;
643
            itc_pte_copy(t);
643
            itc_pte_copy(t);
644
        }
644
        }
645
    }
645
    }
646
    page_table_unlock(AS, true);
646
    page_table_unlock(AS, true);
647
}
647
}
648
 
648
 
649
/** Page not present fault handler.
649
/** Page not present fault handler.
650
 *
650
 *
651
 * @param vector Interruption vector.
651
 * @param vector Interruption vector.
652
 * @param istate Structure with saved interruption state.
652
 * @param istate Structure with saved interruption state.
653
 */
653
 */
654
void page_not_present(__u64 vector, istate_t *istate)
654
void page_not_present(uint64_t vector, istate_t *istate)
655
{
655
{
656
    region_register rr;
656
    region_register rr;
657
    rid_t rid;
657
    rid_t rid;
658
    __address va;
658
    uintptr_t va;
659
    pte_t *t;
659
    pte_t *t;
660
   
660
   
661
    va = istate->cr_ifa;    /* faulting address */
661
    va = istate->cr_ifa;    /* faulting address */
662
    rr.word = rr_read(VA2VRN(va));
662
    rr.word = rr_read(VA2VRN(va));
663
    rid = rr.map.rid;
663
    rid = rr.map.rid;
664
 
664
 
665
    page_table_lock(AS, true);
665
    page_table_lock(AS, true);
666
    t = page_mapping_find(AS, va);
666
    t = page_mapping_find(AS, va);
667
    ASSERT(t);
667
    ASSERT(t);
668
   
668
   
669
    if (t->p) {
669
    if (t->p) {
670
        /*
670
        /*
671
         * If the Present bit is set in page hash table, just copy it
671
         * If the Present bit is set in page hash table, just copy it
672
         * and update ITC/DTC.
672
         * and update ITC/DTC.
673
         */
673
         */
674
        if (t->x)
674
        if (t->x)
675
            itc_pte_copy(t);
675
            itc_pte_copy(t);
676
        else
676
        else
677
            dtc_pte_copy(t);
677
            dtc_pte_copy(t);
678
        page_table_unlock(AS, true);
678
        page_table_unlock(AS, true);
679
    } else {
679
    } else {
680
        page_table_unlock(AS, true);
680
        page_table_unlock(AS, true);
681
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
681
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
682
            fault_if_from_uspace(istate,"Page fault at %p",va);
682
            fault_if_from_uspace(istate,"Page fault at %p",va);
683
            panic("%s: va=%p, rid=%d\n", __FUNCTION__, va, rid);
683
            panic("%s: va=%p, rid=%d\n", __FUNCTION__, va, rid);
684
        }
684
        }
685
    }
685
    }
686
}
686
}
687
 
687
 
688
 /** @}
688
 /** @}
689
 */
689
 */
690
 
690
 
691
 
691