Subversion Repositories HelenOS-historic

Rev

Rev 1675 | Rev 1708 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1675 Rev 1702
1
/*
1
/*
2
 * Copyright (C) 2006 Jakub Jermar
2
 * Copyright (C) 2006 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
-
 
29
 /** @addtogroup ia64mm
-
 
30
 * @{
-
 
31
 */
-
 
32
/** @file
-
 
33
 */
-
 
34
 
29
/*
35
/*
30
 * TLB management.
36
 * TLB management.
31
 */
37
 */
32
 
38
 
33
#include <mm/tlb.h>
39
#include <mm/tlb.h>
34
#include <mm/asid.h>
40
#include <mm/asid.h>
35
#include <mm/page.h>
41
#include <mm/page.h>
36
#include <mm/as.h>
42
#include <mm/as.h>
37
#include <arch/mm/tlb.h>
43
#include <arch/mm/tlb.h>
38
#include <arch/mm/page.h>
44
#include <arch/mm/page.h>
39
#include <arch/mm/vhpt.h>
45
#include <arch/mm/vhpt.h>
40
#include <arch/barrier.h>
46
#include <arch/barrier.h>
41
#include <arch/interrupt.h>
47
#include <arch/interrupt.h>
42
#include <arch/pal/pal.h>
48
#include <arch/pal/pal.h>
43
#include <arch/asm.h>
49
#include <arch/asm.h>
44
#include <typedefs.h>
50
#include <typedefs.h>
45
#include <panic.h>
51
#include <panic.h>
46
#include <print.h>
52
#include <print.h>
47
#include <arch.h>
53
#include <arch.h>
48
#include <interrupt.h>
54
#include <interrupt.h>
49
 
55
 
50
/** Invalidate all TLB entries. */
56
/** Invalidate all TLB entries. */
51
void tlb_invalidate_all(void)
57
void tlb_invalidate_all(void)
52
{
58
{
53
        ipl_t ipl;
59
        ipl_t ipl;
54
        __address adr;
60
        __address adr;
55
        __u32 count1, count2, stride1, stride2;
61
        __u32 count1, count2, stride1, stride2;
56
       
62
       
57
        int i,j;
63
        int i,j;
58
       
64
       
59
        adr = PAL_PTCE_INFO_BASE();
65
        adr = PAL_PTCE_INFO_BASE();
60
        count1 = PAL_PTCE_INFO_COUNT1();
66
        count1 = PAL_PTCE_INFO_COUNT1();
61
        count2 = PAL_PTCE_INFO_COUNT2();
67
        count2 = PAL_PTCE_INFO_COUNT2();
62
        stride1 = PAL_PTCE_INFO_STRIDE1();
68
        stride1 = PAL_PTCE_INFO_STRIDE1();
63
        stride2 = PAL_PTCE_INFO_STRIDE2();
69
        stride2 = PAL_PTCE_INFO_STRIDE2();
64
       
70
       
65
        ipl = interrupts_disable();
71
        ipl = interrupts_disable();
66
 
72
 
67
        for(i = 0; i < count1; i++) {
73
        for(i = 0; i < count1; i++) {
68
            for(j = 0; j < count2; j++) {
74
            for(j = 0; j < count2; j++) {
69
                __asm__ volatile (
75
                __asm__ volatile (
70
                    "ptc.e %0 ;;"
76
                    "ptc.e %0 ;;"
71
                    :
77
                    :
72
                    : "r" (adr)
78
                    : "r" (adr)
73
                );
79
                );
74
                adr += stride2;
80
                adr += stride2;
75
            }
81
            }
76
            adr += stride1;
82
            adr += stride1;
77
        }
83
        }
78
 
84
 
79
        interrupts_restore(ipl);
85
        interrupts_restore(ipl);
80
 
86
 
81
        srlz_d();
87
        srlz_d();
82
        srlz_i();
88
        srlz_i();
83
#ifdef CONFIG_VHPT
89
#ifdef CONFIG_VHPT
84
        vhpt_invalidate_all();
90
        vhpt_invalidate_all();
85
#endif  
91
#endif  
86
}
92
}
87
 
93
 
88
/** Invalidate entries belonging to an address space.
94
/** Invalidate entries belonging to an address space.
89
 *
95
 *
90
 * @param asid Address space identifier.
96
 * @param asid Address space identifier.
91
 */
97
 */
92
void tlb_invalidate_asid(asid_t asid)
98
void tlb_invalidate_asid(asid_t asid)
93
{
99
{
94
    tlb_invalidate_all();
100
    tlb_invalidate_all();
95
}
101
}
96
 
102
 
97
 
103
 
98
void tlb_invalidate_pages(asid_t asid, __address page, count_t cnt)
104
void tlb_invalidate_pages(asid_t asid, __address page, count_t cnt)
99
{
105
{
100
    region_register rr;
106
    region_register rr;
101
    bool restore_rr = false;
107
    bool restore_rr = false;
102
    int b = 0;
108
    int b = 0;
103
    int c = cnt;
109
    int c = cnt;
104
 
110
 
105
    __address va;
111
    __address va;
106
    va = page;
112
    va = page;
107
 
113
 
108
    rr.word = rr_read(VA2VRN(va));
114
    rr.word = rr_read(VA2VRN(va));
109
    if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
115
    if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
110
        /*
116
        /*
111
         * The selected region register does not contain required RID.
117
         * The selected region register does not contain required RID.
112
         * Save the old content of the register and replace the RID.
118
         * Save the old content of the register and replace the RID.
113
         */
119
         */
114
        region_register rr0;
120
        region_register rr0;
115
 
121
 
116
        rr0 = rr;
122
        rr0 = rr;
117
        rr0.map.rid = ASID2RID(asid, VA2VRN(va));
123
        rr0.map.rid = ASID2RID(asid, VA2VRN(va));
118
        rr_write(VA2VRN(va), rr0.word);
124
        rr_write(VA2VRN(va), rr0.word);
119
        srlz_d();
125
        srlz_d();
120
        srlz_i();
126
        srlz_i();
121
    }
127
    }
122
   
128
   
123
    while(c >>= 1)
129
    while(c >>= 1)
124
        b++;
130
        b++;
125
    b >>= 1;
131
    b >>= 1;
126
    __u64 ps;
132
    __u64 ps;
127
   
133
   
128
    switch (b) {
134
    switch (b) {
129
        case 0: /*cnt 1-3*/
135
        case 0: /*cnt 1-3*/
130
            ps = PAGE_WIDTH;
136
            ps = PAGE_WIDTH;
131
            break;
137
            break;
132
        case 1: /*cnt 4-15*/
138
        case 1: /*cnt 4-15*/
133
            /*cnt=((cnt-1)/4)+1;*/
139
            /*cnt=((cnt-1)/4)+1;*/
134
            ps = PAGE_WIDTH+2;
140
            ps = PAGE_WIDTH+2;
135
            va &= ~((1<<ps)-1);
141
            va &= ~((1<<ps)-1);
136
            break;
142
            break;
137
        case 2: /*cnt 16-63*/
143
        case 2: /*cnt 16-63*/
138
            /*cnt=((cnt-1)/16)+1;*/
144
            /*cnt=((cnt-1)/16)+1;*/
139
            ps = PAGE_WIDTH+4;
145
            ps = PAGE_WIDTH+4;
140
            va &= ~((1<<ps)-1);
146
            va &= ~((1<<ps)-1);
141
            break;
147
            break;
142
        case 3: /*cnt 64-255*/
148
        case 3: /*cnt 64-255*/
143
            /*cnt=((cnt-1)/64)+1;*/
149
            /*cnt=((cnt-1)/64)+1;*/
144
            ps = PAGE_WIDTH+6;
150
            ps = PAGE_WIDTH+6;
145
            va &= ~((1<<ps)-1);
151
            va &= ~((1<<ps)-1);
146
            break;
152
            break;
147
        case 4: /*cnt 256-1023*/
153
        case 4: /*cnt 256-1023*/
148
            /*cnt=((cnt-1)/256)+1;*/
154
            /*cnt=((cnt-1)/256)+1;*/
149
            ps = PAGE_WIDTH+8;
155
            ps = PAGE_WIDTH+8;
150
            va &= ~((1<<ps)-1);
156
            va &= ~((1<<ps)-1);
151
            break;
157
            break;
152
        case 5: /*cnt 1024-4095*/
158
        case 5: /*cnt 1024-4095*/
153
            /*cnt=((cnt-1)/1024)+1;*/
159
            /*cnt=((cnt-1)/1024)+1;*/
154
            ps = PAGE_WIDTH+10;
160
            ps = PAGE_WIDTH+10;
155
            va &= ~((1<<ps)-1);
161
            va &= ~((1<<ps)-1);
156
            break;
162
            break;
157
        case 6: /*cnt 4096-16383*/
163
        case 6: /*cnt 4096-16383*/
158
            /*cnt=((cnt-1)/4096)+1;*/
164
            /*cnt=((cnt-1)/4096)+1;*/
159
            ps = PAGE_WIDTH+12;
165
            ps = PAGE_WIDTH+12;
160
            va &= ~((1<<ps)-1);
166
            va &= ~((1<<ps)-1);
161
            break;
167
            break;
162
        case 7: /*cnt 16384-65535*/
168
        case 7: /*cnt 16384-65535*/
163
        case 8: /*cnt 65536-(256K-1)*/
169
        case 8: /*cnt 65536-(256K-1)*/
164
            /*cnt=((cnt-1)/16384)+1;*/
170
            /*cnt=((cnt-1)/16384)+1;*/
165
            ps = PAGE_WIDTH+14;
171
            ps = PAGE_WIDTH+14;
166
            va &= ~((1<<ps)-1);
172
            va &= ~((1<<ps)-1);
167
            break;
173
            break;
168
        default:
174
        default:
169
            /*cnt=((cnt-1)/(16384*16))+1;*/
175
            /*cnt=((cnt-1)/(16384*16))+1;*/
170
            ps=PAGE_WIDTH+18;
176
            ps=PAGE_WIDTH+18;
171
            va&=~((1<<ps)-1);
177
            va&=~((1<<ps)-1);
172
            break;
178
            break;
173
    }
179
    }
174
    /*cnt+=(page!=va);*/
180
    /*cnt+=(page!=va);*/
175
    for(; va<(page+cnt*(PAGE_SIZE)); va += (1<<ps)) {
181
    for(; va<(page+cnt*(PAGE_SIZE)); va += (1<<ps)) {
176
        __asm__ volatile (
182
        __asm__ volatile (
177
            "ptc.l %0,%1;;"
183
            "ptc.l %0,%1;;"
178
            :
184
            :
179
            : "r" (va), "r" (ps<<2)
185
            : "r" (va), "r" (ps<<2)
180
        );
186
        );
181
    }
187
    }
182
    srlz_d();
188
    srlz_d();
183
    srlz_i();
189
    srlz_i();
184
   
190
   
185
    if (restore_rr) {
191
    if (restore_rr) {
186
        rr_write(VA2VRN(va), rr.word);
192
        rr_write(VA2VRN(va), rr.word);
187
        srlz_d();
193
        srlz_d();
188
        srlz_i();
194
        srlz_i();
189
    }
195
    }
190
}
196
}
191
 
197
 
192
/** Insert data into data translation cache.
198
/** Insert data into data translation cache.
193
 *
199
 *
194
 * @param va Virtual page address.
200
 * @param va Virtual page address.
195
 * @param asid Address space identifier.
201
 * @param asid Address space identifier.
196
 * @param entry The rest of TLB entry as required by TLB insertion format.
202
 * @param entry The rest of TLB entry as required by TLB insertion format.
197
 */
203
 */
198
void dtc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry)
204
void dtc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry)
199
{
205
{
200
    tc_mapping_insert(va, asid, entry, true);
206
    tc_mapping_insert(va, asid, entry, true);
201
}
207
}
202
 
208
 
203
/** Insert data into instruction translation cache.
209
/** Insert data into instruction translation cache.
204
 *
210
 *
205
 * @param va Virtual page address.
211
 * @param va Virtual page address.
206
 * @param asid Address space identifier.
212
 * @param asid Address space identifier.
207
 * @param entry The rest of TLB entry as required by TLB insertion format.
213
 * @param entry The rest of TLB entry as required by TLB insertion format.
208
 */
214
 */
209
void itc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry)
215
void itc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry)
210
{
216
{
211
    tc_mapping_insert(va, asid, entry, false);
217
    tc_mapping_insert(va, asid, entry, false);
212
}
218
}
213
 
219
 
214
/** Insert data into instruction or data translation cache.
220
/** Insert data into instruction or data translation cache.
215
 *
221
 *
216
 * @param va Virtual page address.
222
 * @param va Virtual page address.
217
 * @param asid Address space identifier.
223
 * @param asid Address space identifier.
218
 * @param entry The rest of TLB entry as required by TLB insertion format.
224
 * @param entry The rest of TLB entry as required by TLB insertion format.
219
 * @param dtc If true, insert into data translation cache, use instruction translation cache otherwise.
225
 * @param dtc If true, insert into data translation cache, use instruction translation cache otherwise.
220
 */
226
 */
221
void tc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtc)
227
void tc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtc)
222
{
228
{
223
    region_register rr;
229
    region_register rr;
224
    bool restore_rr = false;
230
    bool restore_rr = false;
225
 
231
 
226
    rr.word = rr_read(VA2VRN(va));
232
    rr.word = rr_read(VA2VRN(va));
227
    if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
233
    if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
228
        /*
234
        /*
229
         * The selected region register does not contain required RID.
235
         * The selected region register does not contain required RID.
230
         * Save the old content of the register and replace the RID.
236
         * Save the old content of the register and replace the RID.
231
         */
237
         */
232
        region_register rr0;
238
        region_register rr0;
233
 
239
 
234
        rr0 = rr;
240
        rr0 = rr;
235
        rr0.map.rid = ASID2RID(asid, VA2VRN(va));
241
        rr0.map.rid = ASID2RID(asid, VA2VRN(va));
236
        rr_write(VA2VRN(va), rr0.word);
242
        rr_write(VA2VRN(va), rr0.word);
237
        srlz_d();
243
        srlz_d();
238
        srlz_i();
244
        srlz_i();
239
    }
245
    }
240
   
246
   
241
    __asm__ volatile (
247
    __asm__ volatile (
242
        "mov r8=psr;;\n"
248
        "mov r8=psr;;\n"
243
        "rsm %0;;\n"            /* PSR_IC_MASK */
249
        "rsm %0;;\n"            /* PSR_IC_MASK */
244
        "srlz.d;;\n"
250
        "srlz.d;;\n"
245
        "srlz.i;;\n"
251
        "srlz.i;;\n"
246
        "mov cr.ifa=%1\n"       /* va */
252
        "mov cr.ifa=%1\n"       /* va */
247
        "mov cr.itir=%2;;\n"        /* entry.word[1] */
253
        "mov cr.itir=%2;;\n"        /* entry.word[1] */
248
        "cmp.eq p6,p7 = %4,r0;;\n"  /* decide between itc and dtc */
254
        "cmp.eq p6,p7 = %4,r0;;\n"  /* decide between itc and dtc */
249
        "(p6) itc.i %3;;\n"
255
        "(p6) itc.i %3;;\n"
250
        "(p7) itc.d %3;;\n"
256
        "(p7) itc.d %3;;\n"
251
        "mov psr.l=r8;;\n"
257
        "mov psr.l=r8;;\n"
252
        "srlz.d;;\n"
258
        "srlz.d;;\n"
253
        :
259
        :
254
        : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc)
260
        : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc)
255
        : "p6", "p7", "r8"
261
        : "p6", "p7", "r8"
256
    );
262
    );
257
   
263
   
258
    if (restore_rr) {
264
    if (restore_rr) {
259
        rr_write(VA2VRN(va), rr.word);
265
        rr_write(VA2VRN(va), rr.word);
260
        srlz_d();
266
        srlz_d();
261
        srlz_i();
267
        srlz_i();
262
    }
268
    }
263
}
269
}
264
 
270
 
265
/** Insert data into instruction translation register.
271
/** Insert data into instruction translation register.
266
 *
272
 *
267
 * @param va Virtual page address.
273
 * @param va Virtual page address.
268
 * @param asid Address space identifier.
274
 * @param asid Address space identifier.
269
 * @param entry The rest of TLB entry as required by TLB insertion format.
275
 * @param entry The rest of TLB entry as required by TLB insertion format.
270
 * @param tr Translation register.
276
 * @param tr Translation register.
271
 */
277
 */
272
void itr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
278
void itr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
273
{
279
{
274
    tr_mapping_insert(va, asid, entry, false, tr);
280
    tr_mapping_insert(va, asid, entry, false, tr);
275
}
281
}
276
 
282
 
277
/** Insert data into data translation register.
283
/** Insert data into data translation register.
278
 *
284
 *
279
 * @param va Virtual page address.
285
 * @param va Virtual page address.
280
 * @param asid Address space identifier.
286
 * @param asid Address space identifier.
281
 * @param entry The rest of TLB entry as required by TLB insertion format.
287
 * @param entry The rest of TLB entry as required by TLB insertion format.
282
 * @param tr Translation register.
288
 * @param tr Translation register.
283
 */
289
 */
284
void dtr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
290
void dtr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
285
{
291
{
286
    tr_mapping_insert(va, asid, entry, true, tr);
292
    tr_mapping_insert(va, asid, entry, true, tr);
287
}
293
}
288
 
294
 
289
/** Insert data into instruction or data translation register.
295
/** Insert data into instruction or data translation register.
290
 *
296
 *
291
 * @param va Virtual page address.
297
 * @param va Virtual page address.
292
 * @param asid Address space identifier.
298
 * @param asid Address space identifier.
293
 * @param entry The rest of TLB entry as required by TLB insertion format.
299
 * @param entry The rest of TLB entry as required by TLB insertion format.
294
 * @param dtc If true, insert into data translation register, use instruction translation register otherwise.
300
 * @param dtc If true, insert into data translation register, use instruction translation register otherwise.
295
 * @param tr Translation register.
301
 * @param tr Translation register.
296
 */
302
 */
297
void tr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr)
303
void tr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr)
298
{
304
{
299
    region_register rr;
305
    region_register rr;
300
    bool restore_rr = false;
306
    bool restore_rr = false;
301
 
307
 
302
    rr.word = rr_read(VA2VRN(va));
308
    rr.word = rr_read(VA2VRN(va));
303
    if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
309
    if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
304
        /*
310
        /*
305
         * The selected region register does not contain required RID.
311
         * The selected region register does not contain required RID.
306
         * Save the old content of the register and replace the RID.
312
         * Save the old content of the register and replace the RID.
307
         */
313
         */
308
        region_register rr0;
314
        region_register rr0;
309
 
315
 
310
        rr0 = rr;
316
        rr0 = rr;
311
        rr0.map.rid = ASID2RID(asid, VA2VRN(va));
317
        rr0.map.rid = ASID2RID(asid, VA2VRN(va));
312
        rr_write(VA2VRN(va), rr0.word);
318
        rr_write(VA2VRN(va), rr0.word);
313
        srlz_d();
319
        srlz_d();
314
        srlz_i();
320
        srlz_i();
315
    }
321
    }
316
 
322
 
317
    __asm__ volatile (
323
    __asm__ volatile (
318
        "mov r8=psr;;\n"
324
        "mov r8=psr;;\n"
319
        "rsm %0;;\n"            /* PSR_IC_MASK */
325
        "rsm %0;;\n"            /* PSR_IC_MASK */
320
        "srlz.d;;\n"
326
        "srlz.d;;\n"
321
        "srlz.i;;\n"
327
        "srlz.i;;\n"
322
        "mov cr.ifa=%1\n"           /* va */         
328
        "mov cr.ifa=%1\n"           /* va */         
323
        "mov cr.itir=%2;;\n"        /* entry.word[1] */
329
        "mov cr.itir=%2;;\n"        /* entry.word[1] */
324
        "cmp.eq p6,p7=%5,r0;;\n"    /* decide between itr and dtr */
330
        "cmp.eq p6,p7=%5,r0;;\n"    /* decide between itr and dtr */
325
        "(p6) itr.i itr[%4]=%3;;\n"
331
        "(p6) itr.i itr[%4]=%3;;\n"
326
        "(p7) itr.d dtr[%4]=%3;;\n"
332
        "(p7) itr.d dtr[%4]=%3;;\n"
327
        "mov psr.l=r8;;\n"
333
        "mov psr.l=r8;;\n"
328
        "srlz.d;;\n"
334
        "srlz.d;;\n"
329
        :
335
        :
330
        : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr)
336
        : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr)
331
        : "p6", "p7", "r8"
337
        : "p6", "p7", "r8"
332
    );
338
    );
333
   
339
   
334
    if (restore_rr) {
340
    if (restore_rr) {
335
        rr_write(VA2VRN(va), rr.word);
341
        rr_write(VA2VRN(va), rr.word);
336
        srlz_d();
342
        srlz_d();
337
        srlz_i();
343
        srlz_i();
338
    }
344
    }
339
}
345
}
340
 
346
 
341
/** Insert data into DTLB.
347
/** Insert data into DTLB.
342
 *
348
 *
343
 * @param page Virtual page address including VRN bits.
349
 * @param page Virtual page address including VRN bits.
344
 * @param frame Physical frame address.
350
 * @param frame Physical frame address.
345
 * @param dtr If true, insert into data translation register, use data translation cache otherwise.
351
 * @param dtr If true, insert into data translation register, use data translation cache otherwise.
346
 * @param tr Translation register if dtr is true, ignored otherwise.
352
 * @param tr Translation register if dtr is true, ignored otherwise.
347
 */
353
 */
348
void dtlb_kernel_mapping_insert(__address page, __address frame, bool dtr, index_t tr)
354
void dtlb_kernel_mapping_insert(__address page, __address frame, bool dtr, index_t tr)
349
{
355
{
350
    tlb_entry_t entry;
356
    tlb_entry_t entry;
351
   
357
   
352
    entry.word[0] = 0;
358
    entry.word[0] = 0;
353
    entry.word[1] = 0;
359
    entry.word[1] = 0;
354
   
360
   
355
    entry.p = true;         /* present */
361
    entry.p = true;         /* present */
356
    entry.ma = MA_WRITEBACK;
362
    entry.ma = MA_WRITEBACK;
357
    entry.a = true;         /* already accessed */
363
    entry.a = true;         /* already accessed */
358
    entry.d = true;         /* already dirty */
364
    entry.d = true;         /* already dirty */
359
    entry.pl = PL_KERNEL;
365
    entry.pl = PL_KERNEL;
360
    entry.ar = AR_READ | AR_WRITE;
366
    entry.ar = AR_READ | AR_WRITE;
361
    entry.ppn = frame >> PPN_SHIFT;
367
    entry.ppn = frame >> PPN_SHIFT;
362
    entry.ps = PAGE_WIDTH;
368
    entry.ps = PAGE_WIDTH;
363
   
369
   
364
    if (dtr)
370
    if (dtr)
365
        dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
371
        dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
366
    else
372
    else
367
        dtc_mapping_insert(page, ASID_KERNEL, entry);
373
        dtc_mapping_insert(page, ASID_KERNEL, entry);
368
}
374
}
369
 
375
 
370
/** Purge kernel entries from DTR.
376
/** Purge kernel entries from DTR.
371
 *
377
 *
372
 * Purge DTR entries used by the kernel.
378
 * Purge DTR entries used by the kernel.
373
 *
379
 *
374
 * @param page Virtual page address including VRN bits.
380
 * @param page Virtual page address including VRN bits.
375
 * @param width Width of the purge in bits.
381
 * @param width Width of the purge in bits.
376
 */
382
 */
377
void dtr_purge(__address page, count_t width)
383
void dtr_purge(__address page, count_t width)
378
{
384
{
379
    __asm__ volatile ("ptr.d %0, %1\n" : : "r" (page), "r" (width<<2));
385
    __asm__ volatile ("ptr.d %0, %1\n" : : "r" (page), "r" (width<<2));
380
}
386
}
381
 
387
 
382
 
388
 
383
/** Copy content of PTE into data translation cache.
389
/** Copy content of PTE into data translation cache.
384
 *
390
 *
385
 * @param t PTE.
391
 * @param t PTE.
386
 */
392
 */
387
void dtc_pte_copy(pte_t *t)
393
void dtc_pte_copy(pte_t *t)
388
{
394
{
389
    tlb_entry_t entry;
395
    tlb_entry_t entry;
390
 
396
 
391
    entry.word[0] = 0;
397
    entry.word[0] = 0;
392
    entry.word[1] = 0;
398
    entry.word[1] = 0;
393
   
399
   
394
    entry.p = t->p;
400
    entry.p = t->p;
395
    entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
401
    entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
396
    entry.a = t->a;
402
    entry.a = t->a;
397
    entry.d = t->d;
403
    entry.d = t->d;
398
    entry.pl = t->k ? PL_KERNEL : PL_USER;
404
    entry.pl = t->k ? PL_KERNEL : PL_USER;
399
    entry.ar = t->w ? AR_WRITE : AR_READ;
405
    entry.ar = t->w ? AR_WRITE : AR_READ;
400
    entry.ppn = t->frame >> PPN_SHIFT;
406
    entry.ppn = t->frame >> PPN_SHIFT;
401
    entry.ps = PAGE_WIDTH;
407
    entry.ps = PAGE_WIDTH;
402
   
408
   
403
    dtc_mapping_insert(t->page, t->as->asid, entry);
409
    dtc_mapping_insert(t->page, t->as->asid, entry);
404
#ifdef CONFIG_VHPT
410
#ifdef CONFIG_VHPT
405
    vhpt_mapping_insert(t->page, t->as->asid, entry);
411
    vhpt_mapping_insert(t->page, t->as->asid, entry);
406
#endif  
412
#endif  
407
}
413
}
408
 
414
 
409
/** Copy content of PTE into instruction translation cache.
415
/** Copy content of PTE into instruction translation cache.
410
 *
416
 *
411
 * @param t PTE.
417
 * @param t PTE.
412
 */
418
 */
413
void itc_pte_copy(pte_t *t)
419
void itc_pte_copy(pte_t *t)
414
{
420
{
415
    tlb_entry_t entry;
421
    tlb_entry_t entry;
416
 
422
 
417
    entry.word[0] = 0;
423
    entry.word[0] = 0;
418
    entry.word[1] = 0;
424
    entry.word[1] = 0;
419
   
425
   
420
    ASSERT(t->x);
426
    ASSERT(t->x);
421
   
427
   
422
    entry.p = t->p;
428
    entry.p = t->p;
423
    entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
429
    entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
424
    entry.a = t->a;
430
    entry.a = t->a;
425
    entry.pl = t->k ? PL_KERNEL : PL_USER;
431
    entry.pl = t->k ? PL_KERNEL : PL_USER;
426
    entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
432
    entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
427
    entry.ppn = t->frame >> PPN_SHIFT;
433
    entry.ppn = t->frame >> PPN_SHIFT;
428
    entry.ps = PAGE_WIDTH;
434
    entry.ps = PAGE_WIDTH;
429
   
435
   
430
    itc_mapping_insert(t->page, t->as->asid, entry);
436
    itc_mapping_insert(t->page, t->as->asid, entry);
431
#ifdef CONFIG_VHPT
437
#ifdef CONFIG_VHPT
432
    vhpt_mapping_insert(t->page, t->as->asid, entry);
438
    vhpt_mapping_insert(t->page, t->as->asid, entry);
433
#endif  
439
#endif  
434
}
440
}
435
 
441
 
436
/** Instruction TLB fault handler for faults with VHPT turned off.
442
/** Instruction TLB fault handler for faults with VHPT turned off.
437
 *
443
 *
438
 * @param vector Interruption vector.
444
 * @param vector Interruption vector.
439
 * @param istate Structure with saved interruption state.
445
 * @param istate Structure with saved interruption state.
440
 */
446
 */
441
void alternate_instruction_tlb_fault(__u64 vector, istate_t *istate)
447
void alternate_instruction_tlb_fault(__u64 vector, istate_t *istate)
442
{
448
{
443
    region_register rr;
449
    region_register rr;
444
    rid_t rid;
450
    rid_t rid;
445
    __address va;
451
    __address va;
446
    pte_t *t;
452
    pte_t *t;
447
   
453
   
448
    va = istate->cr_ifa;    /* faulting address */
454
    va = istate->cr_ifa;    /* faulting address */
449
    rr.word = rr_read(VA2VRN(va));
455
    rr.word = rr_read(VA2VRN(va));
450
    rid = rr.map.rid;
456
    rid = rr.map.rid;
451
 
457
 
452
    page_table_lock(AS, true);
458
    page_table_lock(AS, true);
453
    t = page_mapping_find(AS, va);
459
    t = page_mapping_find(AS, va);
454
    if (t) {
460
    if (t) {
455
        /*
461
        /*
456
         * The mapping was found in software page hash table.
462
         * The mapping was found in software page hash table.
457
         * Insert it into data translation cache.
463
         * Insert it into data translation cache.
458
         */
464
         */
459
        itc_pte_copy(t);
465
        itc_pte_copy(t);
460
        page_table_unlock(AS, true);
466
        page_table_unlock(AS, true);
461
    } else {
467
    } else {
462
        /*
468
        /*
463
         * Forward the page fault to address space page fault handler.
469
         * Forward the page fault to address space page fault handler.
464
         */
470
         */
465
        page_table_unlock(AS, true);
471
        page_table_unlock(AS, true);
466
        if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
472
        if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
467
            fault_if_from_uspace(istate,"Page fault at %P",va);
473
            fault_if_from_uspace(istate,"Page fault at %P",va);
468
            panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
474
            panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
469
        }
475
        }
470
    }
476
    }
471
}
477
}
472
 
478
 
473
/** Data TLB fault handler for faults with VHPT turned off.
479
/** Data TLB fault handler for faults with VHPT turned off.
474
 *
480
 *
475
 * @param vector Interruption vector.
481
 * @param vector Interruption vector.
476
 * @param istate Structure with saved interruption state.
482
 * @param istate Structure with saved interruption state.
477
 */
483
 */
478
void alternate_data_tlb_fault(__u64 vector, istate_t *istate)
484
void alternate_data_tlb_fault(__u64 vector, istate_t *istate)
479
{
485
{
480
    region_register rr;
486
    region_register rr;
481
    rid_t rid;
487
    rid_t rid;
482
    __address va;
488
    __address va;
483
    pte_t *t;
489
    pte_t *t;
484
   
490
   
485
    va = istate->cr_ifa;    /* faulting address */
491
    va = istate->cr_ifa;    /* faulting address */
486
    rr.word = rr_read(VA2VRN(va));
492
    rr.word = rr_read(VA2VRN(va));
487
    rid = rr.map.rid;
493
    rid = rr.map.rid;
488
    if (RID2ASID(rid) == ASID_KERNEL) {
494
    if (RID2ASID(rid) == ASID_KERNEL) {
489
        if (VA2VRN(va) == VRN_KERNEL) {
495
        if (VA2VRN(va) == VRN_KERNEL) {
490
            /*
496
            /*
491
             * Provide KA2PA(identity) mapping for faulting piece of
497
             * Provide KA2PA(identity) mapping for faulting piece of
492
             * kernel address space.
498
             * kernel address space.
493
             */
499
             */
494
            dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
500
            dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
495
            return;
501
            return;
496
        }
502
        }
497
    }
503
    }
498
 
504
 
499
    page_table_lock(AS, true);
505
    page_table_lock(AS, true);
500
    t = page_mapping_find(AS, va);
506
    t = page_mapping_find(AS, va);
501
    if (t) {
507
    if (t) {
502
        /*
508
        /*
503
         * The mapping was found in software page hash table.
509
         * The mapping was found in software page hash table.
504
         * Insert it into data translation cache.
510
         * Insert it into data translation cache.
505
         */
511
         */
506
        dtc_pte_copy(t);
512
        dtc_pte_copy(t);
507
        page_table_unlock(AS, true);
513
        page_table_unlock(AS, true);
508
    } else {
514
    } else {
509
        /*
515
        /*
510
         * Forward the page fault to address space page fault handler.
516
         * Forward the page fault to address space page fault handler.
511
         */
517
         */
512
        page_table_unlock(AS, true);
518
        page_table_unlock(AS, true);
513
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
519
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
514
            fault_if_from_uspace(istate,"Page fault at %P",va);
520
            fault_if_from_uspace(istate,"Page fault at %P",va);
515
            panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
521
            panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
516
        }
522
        }
517
    }
523
    }
518
}
524
}
519
 
525
 
520
/** Data nested TLB fault handler.
526
/** Data nested TLB fault handler.
521
 *
527
 *
522
 * This fault should not occur.
528
 * This fault should not occur.
523
 *
529
 *
524
 * @param vector Interruption vector.
530
 * @param vector Interruption vector.
525
 * @param istate Structure with saved interruption state.
531
 * @param istate Structure with saved interruption state.
526
 */
532
 */
527
void data_nested_tlb_fault(__u64 vector, istate_t *istate)
533
void data_nested_tlb_fault(__u64 vector, istate_t *istate)
528
{
534
{
529
    panic("%s\n", __FUNCTION__);
535
    panic("%s\n", __FUNCTION__);
530
}
536
}
531
 
537
 
532
/** Data Dirty bit fault handler.
538
/** Data Dirty bit fault handler.
533
 *
539
 *
534
 * @param vector Interruption vector.
540
 * @param vector Interruption vector.
535
 * @param istate Structure with saved interruption state.
541
 * @param istate Structure with saved interruption state.
536
 */
542
 */
537
void data_dirty_bit_fault(__u64 vector, istate_t *istate)
543
void data_dirty_bit_fault(__u64 vector, istate_t *istate)
538
{
544
{
539
    region_register rr;
545
    region_register rr;
540
    rid_t rid;
546
    rid_t rid;
541
    __address va;
547
    __address va;
542
    pte_t *t;
548
    pte_t *t;
543
   
549
   
544
    va = istate->cr_ifa;    /* faulting address */
550
    va = istate->cr_ifa;    /* faulting address */
545
    rr.word = rr_read(VA2VRN(va));
551
    rr.word = rr_read(VA2VRN(va));
546
    rid = rr.map.rid;
552
    rid = rr.map.rid;
547
 
553
 
548
    page_table_lock(AS, true);
554
    page_table_lock(AS, true);
549
    t = page_mapping_find(AS, va);
555
    t = page_mapping_find(AS, va);
550
    ASSERT(t && t->p);
556
    ASSERT(t && t->p);
551
    if (t && t->p && t->w) {
557
    if (t && t->p && t->w) {
552
        /*
558
        /*
553
         * Update the Dirty bit in page tables and reinsert
559
         * Update the Dirty bit in page tables and reinsert
554
         * the mapping into DTC.
560
         * the mapping into DTC.
555
         */
561
         */
556
        t->d = true;
562
        t->d = true;
557
        dtc_pte_copy(t);
563
        dtc_pte_copy(t);
558
    } else {
564
    } else {
559
        if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
565
        if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
560
            fault_if_from_uspace(istate,"Page fault at %P",va);
566
            fault_if_from_uspace(istate,"Page fault at %P",va);
561
            panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
567
            panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
562
            t->d = true;
568
            t->d = true;
563
            dtc_pte_copy(t);
569
            dtc_pte_copy(t);
564
        }
570
        }
565
    }
571
    }
566
    page_table_unlock(AS, true);
572
    page_table_unlock(AS, true);
567
}
573
}
568
 
574
 
569
/** Instruction access bit fault handler.
575
/** Instruction access bit fault handler.
570
 *
576
 *
571
 * @param vector Interruption vector.
577
 * @param vector Interruption vector.
572
 * @param istate Structure with saved interruption state.
578
 * @param istate Structure with saved interruption state.
573
 */
579
 */
574
void instruction_access_bit_fault(__u64 vector, istate_t *istate)
580
void instruction_access_bit_fault(__u64 vector, istate_t *istate)
575
{
581
{
576
    region_register rr;
582
    region_register rr;
577
    rid_t rid;
583
    rid_t rid;
578
    __address va;
584
    __address va;
579
    pte_t *t;  
585
    pte_t *t;  
580
 
586
 
581
    va = istate->cr_ifa;    /* faulting address */
587
    va = istate->cr_ifa;    /* faulting address */
582
    rr.word = rr_read(VA2VRN(va));
588
    rr.word = rr_read(VA2VRN(va));
583
    rid = rr.map.rid;
589
    rid = rr.map.rid;
584
 
590
 
585
    page_table_lock(AS, true);
591
    page_table_lock(AS, true);
586
    t = page_mapping_find(AS, va);
592
    t = page_mapping_find(AS, va);
587
    ASSERT(t && t->p);
593
    ASSERT(t && t->p);
588
    if (t && t->p && t->x) {
594
    if (t && t->p && t->x) {
589
        /*
595
        /*
590
         * Update the Accessed bit in page tables and reinsert
596
         * Update the Accessed bit in page tables and reinsert
591
         * the mapping into ITC.
597
         * the mapping into ITC.
592
         */
598
         */
593
        t->a = true;
599
        t->a = true;
594
        itc_pte_copy(t);
600
        itc_pte_copy(t);
595
    } else {
601
    } else {
596
        if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
602
        if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
597
            fault_if_from_uspace(istate,"Page fault at %P",va);
603
            fault_if_from_uspace(istate,"Page fault at %P",va);
598
            panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
604
            panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
599
            t->a = true;
605
            t->a = true;
600
            itc_pte_copy(t);
606
            itc_pte_copy(t);
601
        }
607
        }
602
    }
608
    }
603
    page_table_unlock(AS, true);
609
    page_table_unlock(AS, true);
604
}
610
}
605
 
611
 
606
/** Data access bit fault handler.
612
/** Data access bit fault handler.
607
 *
613
 *
608
 * @param vector Interruption vector.
614
 * @param vector Interruption vector.
609
 * @param istate Structure with saved interruption state.
615
 * @param istate Structure with saved interruption state.
610
 */
616
 */
611
void data_access_bit_fault(__u64 vector, istate_t *istate)
617
void data_access_bit_fault(__u64 vector, istate_t *istate)
612
{
618
{
613
    region_register rr;
619
    region_register rr;
614
    rid_t rid;
620
    rid_t rid;
615
    __address va;
621
    __address va;
616
    pte_t *t;
622
    pte_t *t;
617
 
623
 
618
    va = istate->cr_ifa;    /* faulting address */
624
    va = istate->cr_ifa;    /* faulting address */
619
    rr.word = rr_read(VA2VRN(va));
625
    rr.word = rr_read(VA2VRN(va));
620
    rid = rr.map.rid;
626
    rid = rr.map.rid;
621
 
627
 
622
    page_table_lock(AS, true);
628
    page_table_lock(AS, true);
623
    t = page_mapping_find(AS, va);
629
    t = page_mapping_find(AS, va);
624
    ASSERT(t && t->p);
630
    ASSERT(t && t->p);
625
    if (t && t->p) {
631
    if (t && t->p) {
626
        /*
632
        /*
627
         * Update the Accessed bit in page tables and reinsert
633
         * Update the Accessed bit in page tables and reinsert
628
         * the mapping into DTC.
634
         * the mapping into DTC.
629
         */
635
         */
630
        t->a = true;
636
        t->a = true;
631
        dtc_pte_copy(t);
637
        dtc_pte_copy(t);
632
    } else {
638
    } else {
633
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
639
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
634
            fault_if_from_uspace(istate,"Page fault at %P",va);
640
            fault_if_from_uspace(istate,"Page fault at %P",va);
635
            panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
641
            panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
636
            t->a = true;
642
            t->a = true;
637
            itc_pte_copy(t);
643
            itc_pte_copy(t);
638
        }
644
        }
639
    }
645
    }
640
    page_table_unlock(AS, true);
646
    page_table_unlock(AS, true);
641
}
647
}
642
 
648
 
643
/** Page not present fault handler.
649
/** Page not present fault handler.
644
 *
650
 *
645
 * @param vector Interruption vector.
651
 * @param vector Interruption vector.
646
 * @param istate Structure with saved interruption state.
652
 * @param istate Structure with saved interruption state.
647
 */
653
 */
648
void page_not_present(__u64 vector, istate_t *istate)
654
void page_not_present(__u64 vector, istate_t *istate)
649
{
655
{
650
    region_register rr;
656
    region_register rr;
651
    rid_t rid;
657
    rid_t rid;
652
    __address va;
658
    __address va;
653
    pte_t *t;
659
    pte_t *t;
654
   
660
   
655
    va = istate->cr_ifa;    /* faulting address */
661
    va = istate->cr_ifa;    /* faulting address */
656
    rr.word = rr_read(VA2VRN(va));
662
    rr.word = rr_read(VA2VRN(va));
657
    rid = rr.map.rid;
663
    rid = rr.map.rid;
658
 
664
 
659
    page_table_lock(AS, true);
665
    page_table_lock(AS, true);
660
    t = page_mapping_find(AS, va);
666
    t = page_mapping_find(AS, va);
661
    ASSERT(t);
667
    ASSERT(t);
662
   
668
   
663
    if (t->p) {
669
    if (t->p) {
664
        /*
670
        /*
665
         * If the Present bit is set in page hash table, just copy it
671
         * If the Present bit is set in page hash table, just copy it
666
         * and update ITC/DTC.
672
         * and update ITC/DTC.
667
         */
673
         */
668
        if (t->x)
674
        if (t->x)
669
            itc_pte_copy(t);
675
            itc_pte_copy(t);
670
        else
676
        else
671
            dtc_pte_copy(t);
677
            dtc_pte_copy(t);
672
        page_table_unlock(AS, true);
678
        page_table_unlock(AS, true);
673
    } else {
679
    } else {
674
        page_table_unlock(AS, true);
680
        page_table_unlock(AS, true);
675
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
681
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
676
            fault_if_from_uspace(istate,"Page fault at %P",va);
682
            fault_if_from_uspace(istate,"Page fault at %P",va);
677
            panic("%s: va=%p, rid=%d\n", __FUNCTION__, va, rid);
683
            panic("%s: va=%p, rid=%d\n", __FUNCTION__, va, rid);
678
        }
684
        }
679
    }
685
    }
680
}
686
}
-
 
687
 
-
 
688
 /** @}
-
 
689
 */
-
 
690
 
681
 
691