Subversion Repositories HelenOS

Rev

Rev 3635 | Rev 3766 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3635 Rev 3763
1
/*
1
/*
2
 * Copyright (c) 2006 Jakub Jermar
2
 * Copyright (c) 2006 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup ia64mm 
29
/** @addtogroup ia64mm 
30
 * @{
30
 * @{
31
 */
31
 */
32
/** @file
32
/** @file
33
 */
33
 */
34
 
34
 
35
/*
35
/*
36
 * TLB management.
36
 * TLB management.
37
 */
37
 */
38
 
38
 
39
#include <mm/tlb.h>
39
#include <mm/tlb.h>
40
#include <mm/asid.h>
40
#include <mm/asid.h>
41
#include <mm/page.h>
41
#include <mm/page.h>
42
#include <mm/as.h>
42
#include <mm/as.h>
43
#include <arch/mm/tlb.h>
43
#include <arch/mm/tlb.h>
44
#include <arch/mm/page.h>
44
#include <arch/mm/page.h>
45
#include <arch/mm/vhpt.h>
45
#include <arch/mm/vhpt.h>
46
#include <arch/barrier.h>
46
#include <arch/barrier.h>
47
#include <arch/interrupt.h>
47
#include <arch/interrupt.h>
48
#include <arch/pal/pal.h>
48
#include <arch/pal/pal.h>
49
#include <arch/asm.h>
49
#include <arch/asm.h>
50
#include <panic.h>
50
#include <panic.h>
51
#include <print.h>
51
#include <print.h>
52
#include <arch.h>
52
#include <arch.h>
53
#include <interrupt.h>
53
#include <interrupt.h>
54
 
54
 
55
/** Invalidate all TLB entries. */
55
/** Invalidate all TLB entries. */
56
void tlb_invalidate_all(void)
56
void tlb_invalidate_all(void)
57
{
57
{
58
    ipl_t ipl;
58
    ipl_t ipl;
59
    uintptr_t adr;
59
    uintptr_t adr;
60
    uint32_t count1, count2, stride1, stride2;
60
    uint32_t count1, count2, stride1, stride2;
61
       
61
       
62
    unsigned int i, j;
62
    unsigned int i, j;
63
       
63
       
64
    adr = PAL_PTCE_INFO_BASE();
64
    adr = PAL_PTCE_INFO_BASE();
65
    count1 = PAL_PTCE_INFO_COUNT1();
65
    count1 = PAL_PTCE_INFO_COUNT1();
66
    count2 = PAL_PTCE_INFO_COUNT2();
66
    count2 = PAL_PTCE_INFO_COUNT2();
67
    stride1 = PAL_PTCE_INFO_STRIDE1();
67
    stride1 = PAL_PTCE_INFO_STRIDE1();
68
    stride2 = PAL_PTCE_INFO_STRIDE2();
68
    stride2 = PAL_PTCE_INFO_STRIDE2();
69
       
69
       
70
    ipl = interrupts_disable();
70
    ipl = interrupts_disable();
71
 
71
 
72
    for (i = 0; i < count1; i++) {
72
    for (i = 0; i < count1; i++) {
73
        for (j = 0; j < count2; j++) {
73
        for (j = 0; j < count2; j++) {
74
            asm volatile (
74
            asm volatile (
75
                "ptc.e %0 ;;"
75
                "ptc.e %0 ;;"
76
                :
76
                :
77
                : "r" (adr)
77
                : "r" (adr)
78
            );
78
            );
79
            adr += stride2;
79
            adr += stride2;
80
        }
80
        }
81
        adr += stride1;
81
        adr += stride1;
82
    }
82
    }
83
 
83
 
84
    interrupts_restore(ipl);
84
    interrupts_restore(ipl);
85
 
85
 
86
    srlz_d();
86
    srlz_d();
87
    srlz_i();
87
    srlz_i();
88
#ifdef CONFIG_VHPT
88
#ifdef CONFIG_VHPT
89
    vhpt_invalidate_all();
89
    vhpt_invalidate_all();
90
#endif  
90
#endif  
91
}
91
}
92
 
92
 
93
/** Invalidate entries belonging to an address space.
93
/** Invalidate entries belonging to an address space.
94
 *
94
 *
95
 * @param asid Address space identifier.
95
 * @param asid Address space identifier.
96
 */
96
 */
97
void tlb_invalidate_asid(asid_t asid)
97
void tlb_invalidate_asid(asid_t asid)
98
{
98
{
99
    tlb_invalidate_all();
99
    tlb_invalidate_all();
100
}
100
}
101
 
101
 
102
 
102
 
103
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
103
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
104
{
104
{
105
    region_register rr;
105
    region_register rr;
106
    bool restore_rr = false;
106
    bool restore_rr = false;
107
    int b = 0;
107
    int b = 0;
108
    int c = cnt;
108
    int c = cnt;
109
 
109
 
110
    uintptr_t va;
110
    uintptr_t va;
111
    va = page;
111
    va = page;
112
 
112
 
113
    rr.word = rr_read(VA2VRN(va));
113
    rr.word = rr_read(VA2VRN(va));
114
    if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
114
    if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
115
        /*
115
        /*
116
         * The selected region register does not contain required RID.
116
         * The selected region register does not contain required RID.
117
         * Save the old content of the register and replace the RID.
117
         * Save the old content of the register and replace the RID.
118
         */
118
         */
119
        region_register rr0;
119
        region_register rr0;
120
 
120
 
121
        rr0 = rr;
121
        rr0 = rr;
122
        rr0.map.rid = ASID2RID(asid, VA2VRN(va));
122
        rr0.map.rid = ASID2RID(asid, VA2VRN(va));
123
        rr_write(VA2VRN(va), rr0.word);
123
        rr_write(VA2VRN(va), rr0.word);
124
        srlz_d();
124
        srlz_d();
125
        srlz_i();
125
        srlz_i();
126
    }
126
    }
127
   
127
   
128
    while(c >>= 1)
128
    while(c >>= 1)
129
        b++;
129
        b++;
130
    b >>= 1;
130
    b >>= 1;
131
    uint64_t ps;
131
    uint64_t ps;
132
   
132
   
133
    switch (b) {
133
    switch (b) {
134
    case 0: /*cnt 1-3*/
134
    case 0: /*cnt 1-3*/
135
        ps = PAGE_WIDTH;
135
        ps = PAGE_WIDTH;
136
        break;
136
        break;
137
    case 1: /*cnt 4-15*/
137
    case 1: /*cnt 4-15*/
138
        /*cnt=((cnt-1)/4)+1;*/
-
 
139
        ps = PAGE_WIDTH+2;
138
        ps = PAGE_WIDTH+2;
140
        va &= ~((1<<ps)-1);
139
        va &= ~((1<<ps)-1);
141
        break;
140
        break;
142
    case 2: /*cnt 16-63*/
141
    case 2: /*cnt 16-63*/
143
        /*cnt=((cnt-1)/16)+1;*/
-
 
144
        ps = PAGE_WIDTH+4;
142
        ps = PAGE_WIDTH+4;
145
        va &= ~((1<<ps)-1);
143
        va &= ~((1<<ps)-1);
146
        break;
144
        break;
147
    case 3: /*cnt 64-255*/
145
    case 3: /*cnt 64-255*/
148
        /*cnt=((cnt-1)/64)+1;*/
-
 
149
        ps = PAGE_WIDTH+6;
146
        ps = PAGE_WIDTH+6;
150
        va &= ~((1<<ps)-1);
147
        va &= ~((1<<ps)-1);
151
        break;
148
        break;
152
    case 4: /*cnt 256-1023*/
149
    case 4: /*cnt 256-1023*/
153
        /*cnt=((cnt-1)/256)+1;*/
-
 
154
        ps = PAGE_WIDTH+8;
150
        ps = PAGE_WIDTH+8;
155
        va &= ~((1<<ps)-1);
151
        va &= ~((1<<ps)-1);
156
        break;
152
        break;
157
    case 5: /*cnt 1024-4095*/
153
    case 5: /*cnt 1024-4095*/
158
        /*cnt=((cnt-1)/1024)+1;*/
-
 
159
        ps = PAGE_WIDTH+10;
154
        ps = PAGE_WIDTH+10;
160
        va &= ~((1<<ps)-1);
155
        va &= ~((1<<ps)-1);
161
        break;
156
        break;
162
    case 6: /*cnt 4096-16383*/
157
    case 6: /*cnt 4096-16383*/
163
        /*cnt=((cnt-1)/4096)+1;*/
-
 
164
        ps = PAGE_WIDTH+12;
158
        ps = PAGE_WIDTH+12;
165
        va &= ~((1<<ps)-1);
159
        va &= ~((1<<ps)-1);
166
        break;
160
        break;
167
    case 7: /*cnt 16384-65535*/
161
    case 7: /*cnt 16384-65535*/
168
    case 8: /*cnt 65536-(256K-1)*/
162
    case 8: /*cnt 65536-(256K-1)*/
169
        /*cnt=((cnt-1)/16384)+1;*/
-
 
170
        ps = PAGE_WIDTH+14;
163
        ps = PAGE_WIDTH+14;
171
        va &= ~((1<<ps)-1);
164
        va &= ~((1<<ps)-1);
172
        break;
165
        break;
173
    default:
166
    default:
174
        /*cnt=((cnt-1)/(16384*16))+1;*/
-
 
175
        ps=PAGE_WIDTH+18;
167
        ps=PAGE_WIDTH+18;
176
        va&=~((1<<ps)-1);
168
        va&=~((1<<ps)-1);
177
        break;
169
        break;
178
    }
170
    }
179
    /*cnt+=(page!=va);*/
-
 
180
    for(; va<(page+cnt*(PAGE_SIZE)); va += (1<<ps)) {
171
    for(; va<(page+cnt*(PAGE_SIZE)); va += (1<<ps)) {
181
        asm volatile (
172
        asm volatile (
182
            "ptc.l %0,%1;;"
173
            "ptc.l %0,%1;;"
183
            :
174
            :
184
            : "r" (va), "r" (ps<<2)
175
            : "r" (va), "r" (ps<<2)
185
        );
176
        );
186
    }
177
    }
187
    srlz_d();
178
    srlz_d();
188
    srlz_i();
179
    srlz_i();
189
   
180
   
190
    if (restore_rr) {
181
    if (restore_rr) {
191
        rr_write(VA2VRN(va), rr.word);
182
        rr_write(VA2VRN(va), rr.word);
192
        srlz_d();
183
        srlz_d();
193
        srlz_i();
184
        srlz_i();
194
    }
185
    }
195
}
186
}
196
 
187
 
197
/** Insert data into data translation cache.
188
/** Insert data into data translation cache.
198
 *
189
 *
199
 * @param va Virtual page address.
190
 * @param va Virtual page address.
200
 * @param asid Address space identifier.
191
 * @param asid Address space identifier.
201
 * @param entry The rest of TLB entry as required by TLB insertion format.
192
 * @param entry The rest of TLB entry as required by TLB insertion format.
202
 */
193
 */
203
void dtc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
194
void dtc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
204
{
195
{
205
    tc_mapping_insert(va, asid, entry, true);
196
    tc_mapping_insert(va, asid, entry, true);
206
}
197
}
207
 
198
 
208
/** Insert data into instruction translation cache.
199
/** Insert data into instruction translation cache.
209
 *
200
 *
210
 * @param va Virtual page address.
201
 * @param va Virtual page address.
211
 * @param asid Address space identifier.
202
 * @param asid Address space identifier.
212
 * @param entry The rest of TLB entry as required by TLB insertion format.
203
 * @param entry The rest of TLB entry as required by TLB insertion format.
213
 */
204
 */
214
void itc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
205
void itc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
215
{
206
{
216
    tc_mapping_insert(va, asid, entry, false);
207
    tc_mapping_insert(va, asid, entry, false);
217
}
208
}
218
 
209
 
219
/** Insert data into instruction or data translation cache.
210
/** Insert data into instruction or data translation cache.
220
 *
211
 *
221
 * @param va Virtual page address.
212
 * @param va Virtual page address.
222
 * @param asid Address space identifier.
213
 * @param asid Address space identifier.
223
 * @param entry The rest of TLB entry as required by TLB insertion format.
214
 * @param entry The rest of TLB entry as required by TLB insertion format.
224
 * @param dtc If true, insert into data translation cache, use instruction translation cache otherwise.
215
 * @param dtc If true, insert into data translation cache, use instruction translation cache otherwise.
225
 */
216
 */
226
void tc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtc)
217
void tc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtc)
227
{
218
{
228
    region_register rr;
219
    region_register rr;
229
    bool restore_rr = false;
220
    bool restore_rr = false;
230
 
221
 
231
    rr.word = rr_read(VA2VRN(va));
222
    rr.word = rr_read(VA2VRN(va));
232
    if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
223
    if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
233
        /*
224
        /*
234
         * The selected region register does not contain required RID.
225
         * The selected region register does not contain required RID.
235
         * Save the old content of the register and replace the RID.
226
         * Save the old content of the register and replace the RID.
236
         */
227
         */
237
        region_register rr0;
228
        region_register rr0;
238
 
229
 
239
        rr0 = rr;
230
        rr0 = rr;
240
        rr0.map.rid = ASID2RID(asid, VA2VRN(va));
231
        rr0.map.rid = ASID2RID(asid, VA2VRN(va));
241
        rr_write(VA2VRN(va), rr0.word);
232
        rr_write(VA2VRN(va), rr0.word);
242
        srlz_d();
233
        srlz_d();
243
        srlz_i();
234
        srlz_i();
244
    }
235
    }
245
   
236
   
246
    asm volatile (
237
    asm volatile (
247
        "mov r8=psr;;\n"
238
        "mov r8=psr;;\n"
248
        "rsm %0;;\n"            /* PSR_IC_MASK */
239
        "rsm %0;;\n"            /* PSR_IC_MASK */
249
        "srlz.d;;\n"
240
        "srlz.d;;\n"
250
        "srlz.i;;\n"
241
        "srlz.i;;\n"
251
        "mov cr.ifa=%1\n"       /* va */
242
        "mov cr.ifa=%1\n"       /* va */
252
        "mov cr.itir=%2;;\n"        /* entry.word[1] */
243
        "mov cr.itir=%2;;\n"        /* entry.word[1] */
253
        "cmp.eq p6,p7 = %4,r0;;\n"  /* decide between itc and dtc */
244
        "cmp.eq p6,p7 = %4,r0;;\n"  /* decide between itc and dtc */
254
        "(p6) itc.i %3;;\n"
245
        "(p6) itc.i %3;;\n"
255
        "(p7) itc.d %3;;\n"
246
        "(p7) itc.d %3;;\n"
256
        "mov psr.l=r8;;\n"
247
        "mov psr.l=r8;;\n"
257
        "srlz.d;;\n"
248
        "srlz.d;;\n"
258
        :
249
        :
259
        : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc)
250
        : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc)
260
        : "p6", "p7", "r8"
251
        : "p6", "p7", "r8"
261
    );
252
    );
262
   
253
   
263
    if (restore_rr) {
254
    if (restore_rr) {
264
        rr_write(VA2VRN(va), rr.word);
255
        rr_write(VA2VRN(va), rr.word);
265
        srlz_d();
256
        srlz_d();
266
        srlz_i();
257
        srlz_i();
267
    }
258
    }
268
}
259
}
269
 
260
 
270
/** Insert data into instruction translation register.
261
/** Insert data into instruction translation register.
271
 *
262
 *
272
 * @param va Virtual page address.
263
 * @param va Virtual page address.
273
 * @param asid Address space identifier.
264
 * @param asid Address space identifier.
274
 * @param entry The rest of TLB entry as required by TLB insertion format.
265
 * @param entry The rest of TLB entry as required by TLB insertion format.
275
 * @param tr Translation register.
266
 * @param tr Translation register.
276
 */
267
 */
277
void itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr)
268
void itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr)
278
{
269
{
279
    tr_mapping_insert(va, asid, entry, false, tr);
270
    tr_mapping_insert(va, asid, entry, false, tr);
280
}
271
}
281
 
272
 
282
/** Insert data into data translation register.
273
/** Insert data into data translation register.
283
 *
274
 *
284
 * @param va Virtual page address.
275
 * @param va Virtual page address.
285
 * @param asid Address space identifier.
276
 * @param asid Address space identifier.
286
 * @param entry The rest of TLB entry as required by TLB insertion format.
277
 * @param entry The rest of TLB entry as required by TLB insertion format.
287
 * @param tr Translation register.
278
 * @param tr Translation register.
288
 */
279
 */
289
void dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr)
280
void dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr)
290
{
281
{
291
    tr_mapping_insert(va, asid, entry, true, tr);
282
    tr_mapping_insert(va, asid, entry, true, tr);
292
}
283
}
293
 
284
 
294
/** Insert data into instruction or data translation register.
285
/** Insert data into instruction or data translation register.
295
 *
286
 *
296
 * @param va Virtual page address.
287
 * @param va Virtual page address.
297
 * @param asid Address space identifier.
288
 * @param asid Address space identifier.
298
 * @param entry The rest of TLB entry as required by TLB insertion format.
289
 * @param entry The rest of TLB entry as required by TLB insertion format.
299
 * @param dtr If true, insert into data translation register, use instruction translation register otherwise.
290
 * @param dtr If true, insert into data translation register, use instruction translation register otherwise.
300
 * @param tr Translation register.
291
 * @param tr Translation register.
301
 */
292
 */
302
void tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr)
293
void tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr)
303
{
294
{
304
    region_register rr;
295
    region_register rr;
305
    bool restore_rr = false;
296
    bool restore_rr = false;
306
 
297
 
307
    rr.word = rr_read(VA2VRN(va));
298
    rr.word = rr_read(VA2VRN(va));
308
    if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
299
    if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
309
        /*
300
        /*
310
         * The selected region register does not contain required RID.
301
         * The selected region register does not contain required RID.
311
         * Save the old content of the register and replace the RID.
302
         * Save the old content of the register and replace the RID.
312
         */
303
         */
313
        region_register rr0;
304
        region_register rr0;
314
 
305
 
315
        rr0 = rr;
306
        rr0 = rr;
316
        rr0.map.rid = ASID2RID(asid, VA2VRN(va));
307
        rr0.map.rid = ASID2RID(asid, VA2VRN(va));
317
        rr_write(VA2VRN(va), rr0.word);
308
        rr_write(VA2VRN(va), rr0.word);
318
        srlz_d();
309
        srlz_d();
319
        srlz_i();
310
        srlz_i();
320
    }
311
    }
321
 
312
 
322
    asm volatile (
313
    asm volatile (
323
        "mov r8=psr;;\n"
314
        "mov r8=psr;;\n"
324
        "rsm %0;;\n"            /* PSR_IC_MASK */
315
        "rsm %0;;\n"            /* PSR_IC_MASK */
325
        "srlz.d;;\n"
316
        "srlz.d;;\n"
326
        "srlz.i;;\n"
317
        "srlz.i;;\n"
327
        "mov cr.ifa=%1\n"           /* va */         
318
        "mov cr.ifa=%1\n"           /* va */         
328
        "mov cr.itir=%2;;\n"        /* entry.word[1] */
319
        "mov cr.itir=%2;;\n"        /* entry.word[1] */
329
        "cmp.eq p6,p7=%5,r0;;\n"    /* decide between itr and dtr */
320
        "cmp.eq p6,p7=%5,r0;;\n"    /* decide between itr and dtr */
330
        "(p6) itr.i itr[%4]=%3;;\n"
321
        "(p6) itr.i itr[%4]=%3;;\n"
331
        "(p7) itr.d dtr[%4]=%3;;\n"
322
        "(p7) itr.d dtr[%4]=%3;;\n"
332
        "mov psr.l=r8;;\n"
323
        "mov psr.l=r8;;\n"
333
        "srlz.d;;\n"
324
        "srlz.d;;\n"
334
        :
325
        :
335
        : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr)
326
        : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr)
336
        : "p6", "p7", "r8"
327
        : "p6", "p7", "r8"
337
    );
328
    );
338
   
329
   
339
    if (restore_rr) {
330
    if (restore_rr) {
340
        rr_write(VA2VRN(va), rr.word);
331
        rr_write(VA2VRN(va), rr.word);
341
        srlz_d();
332
        srlz_d();
342
        srlz_i();
333
        srlz_i();
343
    }
334
    }
344
}
335
}
345
 
336
 
346
/** Insert data into DTLB.
337
/** Insert data into DTLB.
347
 *
338
 *
348
 * @param page Virtual page address including VRN bits.
339
 * @param page Virtual page address including VRN bits.
349
 * @param frame Physical frame address.
340
 * @param frame Physical frame address.
350
 * @param dtr If true, insert into data translation register, use data translation cache otherwise.
341
 * @param dtr If true, insert into data translation register, use data translation cache otherwise.
351
 * @param tr Translation register if dtr is true, ignored otherwise.
342
 * @param tr Translation register if dtr is true, ignored otherwise.
352
 */
343
 */
353
void dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr, index_t tr)
344
void dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr, index_t tr)
354
{
345
{
355
    tlb_entry_t entry;
346
    tlb_entry_t entry;
356
   
347
   
357
    entry.word[0] = 0;
348
    entry.word[0] = 0;
358
    entry.word[1] = 0;
349
    entry.word[1] = 0;
359
   
350
   
360
    entry.p = true;         /* present */
351
    entry.p = true;         /* present */
361
    entry.ma = MA_WRITEBACK;
352
    entry.ma = MA_WRITEBACK;
362
    entry.a = true;         /* already accessed */
353
    entry.a = true;         /* already accessed */
363
    entry.d = true;         /* already dirty */
354
    entry.d = true;         /* already dirty */
364
    entry.pl = PL_KERNEL;
355
    entry.pl = PL_KERNEL;
365
    entry.ar = AR_READ | AR_WRITE;
356
    entry.ar = AR_READ | AR_WRITE;
366
    entry.ppn = frame >> PPN_SHIFT;
357
    entry.ppn = frame >> PPN_SHIFT;
367
    entry.ps = PAGE_WIDTH;
358
    entry.ps = PAGE_WIDTH;
368
   
359
   
369
    if (dtr)
360
    if (dtr)
370
        dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
361
        dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
371
    else
362
    else
372
        dtc_mapping_insert(page, ASID_KERNEL, entry);
363
        dtc_mapping_insert(page, ASID_KERNEL, entry);
373
}
364
}
374
 
365
 
375
/** Purge kernel entries from DTR.
366
/** Purge kernel entries from DTR.
376
 *
367
 *
377
 * Purge DTR entries used by the kernel.
368
 * Purge DTR entries used by the kernel.
378
 *
369
 *
379
 * @param page Virtual page address including VRN bits.
370
 * @param page Virtual page address including VRN bits.
380
 * @param width Width of the purge in bits.
371
 * @param width Width of the purge in bits.
381
 */
372
 */
382
void dtr_purge(uintptr_t page, count_t width)
373
void dtr_purge(uintptr_t page, count_t width)
383
{
374
{
384
    asm volatile ("ptr.d %0, %1\n" : : "r" (page), "r" (width<<2));
375
    asm volatile ("ptr.d %0, %1\n" : : "r" (page), "r" (width<<2));
385
}
376
}
386
 
377
 
387
 
378
 
388
/** Copy content of PTE into data translation cache.
379
/** Copy content of PTE into data translation cache.
389
 *
380
 *
390
 * @param t PTE.
381
 * @param t PTE.
391
 */
382
 */
392
void dtc_pte_copy(pte_t *t)
383
void dtc_pte_copy(pte_t *t)
393
{
384
{
394
    tlb_entry_t entry;
385
    tlb_entry_t entry;
395
 
386
 
396
    entry.word[0] = 0;
387
    entry.word[0] = 0;
397
    entry.word[1] = 0;
388
    entry.word[1] = 0;
398
   
389
   
399
    entry.p = t->p;
390
    entry.p = t->p;
400
    entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
391
    entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
401
    entry.a = t->a;
392
    entry.a = t->a;
402
    entry.d = t->d;
393
    entry.d = t->d;
403
    entry.pl = t->k ? PL_KERNEL : PL_USER;
394
    entry.pl = t->k ? PL_KERNEL : PL_USER;
404
    entry.ar = t->w ? AR_WRITE : AR_READ;
395
    entry.ar = t->w ? AR_WRITE : AR_READ;
405
    entry.ppn = t->frame >> PPN_SHIFT;
396
    entry.ppn = t->frame >> PPN_SHIFT;
406
    entry.ps = PAGE_WIDTH;
397
    entry.ps = PAGE_WIDTH;
407
   
398
   
408
    dtc_mapping_insert(t->page, t->as->asid, entry);
399
    dtc_mapping_insert(t->page, t->as->asid, entry);
409
#ifdef CONFIG_VHPT
400
#ifdef CONFIG_VHPT
410
    vhpt_mapping_insert(t->page, t->as->asid, entry);
401
    vhpt_mapping_insert(t->page, t->as->asid, entry);
411
#endif  
402
#endif  
412
}
403
}
413
 
404
 
414
/** Copy content of PTE into instruction translation cache.
405
/** Copy content of PTE into instruction translation cache.
415
 *
406
 *
416
 * @param t PTE.
407
 * @param t PTE.
417
 */
408
 */
418
void itc_pte_copy(pte_t *t)
409
void itc_pte_copy(pte_t *t)
419
{
410
{
420
    tlb_entry_t entry;
411
    tlb_entry_t entry;
421
 
412
 
422
    entry.word[0] = 0;
413
    entry.word[0] = 0;
423
    entry.word[1] = 0;
414
    entry.word[1] = 0;
424
   
415
   
425
    ASSERT(t->x);
416
    ASSERT(t->x);
426
   
417
   
427
    entry.p = t->p;
418
    entry.p = t->p;
428
    entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
419
    entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
429
    entry.a = t->a;
420
    entry.a = t->a;
430
    entry.pl = t->k ? PL_KERNEL : PL_USER;
421
    entry.pl = t->k ? PL_KERNEL : PL_USER;
431
    entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
422
    entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
432
    entry.ppn = t->frame >> PPN_SHIFT;
423
    entry.ppn = t->frame >> PPN_SHIFT;
433
    entry.ps = PAGE_WIDTH;
424
    entry.ps = PAGE_WIDTH;
434
   
425
   
435
    itc_mapping_insert(t->page, t->as->asid, entry);
426
    itc_mapping_insert(t->page, t->as->asid, entry);
436
#ifdef CONFIG_VHPT
427
#ifdef CONFIG_VHPT
437
    vhpt_mapping_insert(t->page, t->as->asid, entry);
428
    vhpt_mapping_insert(t->page, t->as->asid, entry);
438
#endif  
429
#endif  
439
}
430
}
440
 
431
 
441
/** Instruction TLB fault handler for faults with VHPT turned off.
432
/** Instruction TLB fault handler for faults with VHPT turned off.
442
 *
433
 *
443
 * @param vector Interruption vector.
434
 * @param vector Interruption vector.
444
 * @param istate Structure with saved interruption state.
435
 * @param istate Structure with saved interruption state.
445
 */
436
 */
446
void alternate_instruction_tlb_fault(uint64_t vector, istate_t *istate)
437
void alternate_instruction_tlb_fault(uint64_t vector, istate_t *istate)
447
{
438
{
448
    region_register rr;
439
    region_register rr;
449
    rid_t rid;
440
    rid_t rid;
450
    uintptr_t va;
441
    uintptr_t va;
451
    pte_t *t;
442
    pte_t *t;
452
   
443
   
453
    va = istate->cr_ifa;    /* faulting address */
444
    va = istate->cr_ifa;    /* faulting address */
454
    rr.word = rr_read(VA2VRN(va));
445
    rr.word = rr_read(VA2VRN(va));
455
    rid = rr.map.rid;
446
    rid = rr.map.rid;
456
 
447
 
457
    page_table_lock(AS, true);
448
    page_table_lock(AS, true);
458
    t = page_mapping_find(AS, va);
449
    t = page_mapping_find(AS, va);
459
    if (t) {
450
    if (t) {
460
        /*
451
        /*
461
         * The mapping was found in software page hash table.
452
         * The mapping was found in software page hash table.
462
         * Insert it into data translation cache.
453
         * Insert it into data translation cache.
463
         */
454
         */
464
        itc_pte_copy(t);
455
        itc_pte_copy(t);
465
        page_table_unlock(AS, true);
456
        page_table_unlock(AS, true);
466
    } else {
457
    } else {
467
        /*
458
        /*
468
         * Forward the page fault to address space page fault handler.
459
         * Forward the page fault to address space page fault handler.
469
         */
460
         */
470
        page_table_unlock(AS, true);
461
        page_table_unlock(AS, true);
471
        if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
462
        if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
472
            fault_if_from_uspace(istate,"Page fault at %p",va);
463
            fault_if_from_uspace(istate,"Page fault at %p",va);
473
            panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
464
            panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
474
        }
465
        }
475
    }
466
    }
476
}
467
}
477
 
468
 
478
 
469
 
479
 
470
 
480
static int is_io_page_accessible(int page)
471
static int is_io_page_accessible(int page)
481
{
472
{
482
    if(TASK->arch.iomap) return bitmap_get(TASK->arch.iomap,page);
473
    if(TASK->arch.iomap) return bitmap_get(TASK->arch.iomap,page);
483
    else return 0;
474
    else return 0;
484
}
475
}
485
 
476
 
486
#define IO_FRAME_BASE 0xFFFFC000000
477
#define IO_FRAME_BASE 0xFFFFC000000
487
 
478
 
488
/** There is special handling of memmaped lagacy io, because
479
/** There is special handling of memmaped lagacy io, because
489
 * of 4KB sized access
480
 * of 4KB sized access
490
 * only for userspace
481
 * only for userspace
491
 *
482
 *
492
 * @param va virtual address of page fault
483
 * @param va virtual address of page fault
493
 * @param istate Structure with saved interruption state.
484
 * @param istate Structure with saved interruption state.
494
 *
485
 *
495
 *
486
 *
496
 * @return 1 on success, 0 on fail
487
 * @return 1 on success, 0 on fail
497
 */
488
 */
498
static int try_memmap_io_insertion(uintptr_t va, istate_t *istate)
489
static int try_memmap_io_insertion(uintptr_t va, istate_t *istate)
499
{
490
{
500
    if((va >= IO_OFFSET ) && (va < IO_OFFSET + (1<<IO_PAGE_WIDTH)))
491
    if((va >= IO_OFFSET ) && (va < IO_OFFSET + (1<<IO_PAGE_WIDTH)))
501
        if(TASK){
492
        if(TASK){
502
           
493
           
503
            uint64_t io_page=(va &  ((1<<IO_PAGE_WIDTH)-1)) >> (USPACE_IO_PAGE_WIDTH);
494
            uint64_t io_page=(va &  ((1<<IO_PAGE_WIDTH)-1)) >> (USPACE_IO_PAGE_WIDTH);
504
            if(is_io_page_accessible(io_page)){
495
            if(is_io_page_accessible(io_page)){
505
                //printf("Insert %llX\n",va);
-
 
506
 
-
 
507
                uint64_t page,frame;
496
                uint64_t page,frame;
508
 
497
 
509
                page = IO_OFFSET + (1 << USPACE_IO_PAGE_WIDTH) * io_page;
498
                page = IO_OFFSET + (1 << USPACE_IO_PAGE_WIDTH) * io_page;
510
                frame = IO_FRAME_BASE + (1 << USPACE_IO_PAGE_WIDTH) * io_page;
499
                frame = IO_FRAME_BASE + (1 << USPACE_IO_PAGE_WIDTH) * io_page;
511
 
500
 
512
 
501
 
513
                tlb_entry_t entry;
502
                tlb_entry_t entry;
514
   
503
   
515
                entry.word[0] = 0;
504
                entry.word[0] = 0;
516
                entry.word[1] = 0;
505
                entry.word[1] = 0;
517
   
506
   
518
                entry.p = true;         /* present */
507
                entry.p = true;         /* present */
519
                entry.ma = MA_UNCACHEABLE;     
508
                entry.ma = MA_UNCACHEABLE;     
520
                entry.a = true;         /* already accessed */
509
                entry.a = true;         /* already accessed */
521
                entry.d = true;         /* already dirty */
510
                entry.d = true;         /* already dirty */
522
                entry.pl = PL_USER;
511
                entry.pl = PL_USER;
523
                entry.ar = AR_READ | AR_WRITE;
512
                entry.ar = AR_READ | AR_WRITE;
524
                entry.ppn = frame >> PPN_SHIFT;    //MUSIM spocitat frame
513
                entry.ppn = frame >> PPN_SHIFT;
525
                entry.ps = USPACE_IO_PAGE_WIDTH;
514
                entry.ps = USPACE_IO_PAGE_WIDTH;
526
   
515
   
527
                dtc_mapping_insert(page, TASK->as->asid, entry); //Musim zjistit ASID
516
                dtc_mapping_insert(page, TASK->as->asid, entry);
528
                return 1;
517
                return 1;
529
            }else {
518
            }else {
530
                fault_if_from_uspace(istate,"IO access fault at %p",va);
519
                fault_if_from_uspace(istate,"IO access fault at %p",va);
531
                return 0;
520
                return 0;
532
            }      
521
            }      
533
        } else
522
        } else
534
            return 0;
523
            return 0;
535
    else
524
    else
536
        return 0;
525
        return 0;
537
       
526
       
538
    return 0;
527
    return 0;
539
 
528
 
540
}
529
}
541
 
530
 
542
 
531
 
543
 
532
 
544
 
533
 
545
/** Data TLB fault handler for faults with VHPT turned off.
534
/** Data TLB fault handler for faults with VHPT turned off.
546
 *
535
 *
547
 * @param vector Interruption vector.
536
 * @param vector Interruption vector.
548
 * @param istate Structure with saved interruption state.
537
 * @param istate Structure with saved interruption state.
549
 */
538
 */
550
void alternate_data_tlb_fault(uint64_t vector, istate_t *istate)
539
void alternate_data_tlb_fault(uint64_t vector, istate_t *istate)
551
{
540
{
552
    region_register rr;
541
    region_register rr;
553
    rid_t rid;
542
    rid_t rid;
554
    uintptr_t va;
543
    uintptr_t va;
555
    pte_t *t;
544
    pte_t *t;
556
   
545
   
557
    va = istate->cr_ifa;    /* faulting address */
546
    va = istate->cr_ifa;    /* faulting address */
558
    rr.word = rr_read(VA2VRN(va));
547
    rr.word = rr_read(VA2VRN(va));
559
    rid = rr.map.rid;
548
    rid = rr.map.rid;
560
    if (RID2ASID(rid) == ASID_KERNEL) {
549
    if (RID2ASID(rid) == ASID_KERNEL) {
561
        if (VA2VRN(va) == VRN_KERNEL) {
550
        if (VA2VRN(va) == VRN_KERNEL) {
562
            /*
551
            /*
563
             * Provide KA2PA(identity) mapping for faulting piece of
552
             * Provide KA2PA(identity) mapping for faulting piece of
564
             * kernel address space.
553
             * kernel address space.
565
             */
554
             */
566
            dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
555
            dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
567
            return;
556
            return;
568
        }
557
        }
569
    }
558
    }
570
 
559
 
571
    page_table_lock(AS, true);
560
    page_table_lock(AS, true);
572
    t = page_mapping_find(AS, va);
561
    t = page_mapping_find(AS, va);
573
    if (t) {
562
    if (t) {
574
        /*
563
        /*
575
         * The mapping was found in the software page hash table.
564
         * The mapping was found in the software page hash table.
576
         * Insert it into data translation cache.
565
         * Insert it into data translation cache.
577
         */
566
         */
578
        dtc_pte_copy(t);
567
        dtc_pte_copy(t);
579
        page_table_unlock(AS, true);
568
        page_table_unlock(AS, true);
580
    } else {
569
    } else {
581
        page_table_unlock(AS, true);
570
        page_table_unlock(AS, true);
582
        if (try_memmap_io_insertion(va,istate)) return;
571
        if (try_memmap_io_insertion(va,istate)) return;
583
        /*
572
        /*
584
         * Forward the page fault to the address space page fault handler.
573
         * Forward the page fault to the address space page fault handler.
585
         */
574
         */
586
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
575
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
587
            fault_if_from_uspace(istate,"Page fault at %p",va);
576
            fault_if_from_uspace(istate,"Page fault at %p",va);
588
            panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
577
            panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
589
        }
578
        }
590
    }
579
    }
591
}
580
}
592
 
581
 
593
/** Data nested TLB fault handler.
582
/** Data nested TLB fault handler.
594
 *
583
 *
595
 * This fault should not occur.
584
 * This fault should not occur.
596
 *
585
 *
597
 * @param vector Interruption vector.
586
 * @param vector Interruption vector.
598
 * @param istate Structure with saved interruption state.
587
 * @param istate Structure with saved interruption state.
599
 */
588
 */
600
void data_nested_tlb_fault(uint64_t vector, istate_t *istate)
589
void data_nested_tlb_fault(uint64_t vector, istate_t *istate)
601
{
590
{
602
    panic("%s\n", __func__);
591
    panic("%s\n", __func__);
603
}
592
}
604
 
593
 
605
/** Data Dirty bit fault handler.
594
/** Data Dirty bit fault handler.
606
 *
595
 *
607
 * @param vector Interruption vector.
596
 * @param vector Interruption vector.
608
 * @param istate Structure with saved interruption state.
597
 * @param istate Structure with saved interruption state.
609
 */
598
 */
610
void data_dirty_bit_fault(uint64_t vector, istate_t *istate)
599
void data_dirty_bit_fault(uint64_t vector, istate_t *istate)
611
{
600
{
612
    region_register rr;
601
    region_register rr;
613
    rid_t rid;
602
    rid_t rid;
614
    uintptr_t va;
603
    uintptr_t va;
615
    pte_t *t;
604
    pte_t *t;
616
   
605
   
617
    va = istate->cr_ifa;    /* faulting address */
606
    va = istate->cr_ifa;    /* faulting address */
618
    rr.word = rr_read(VA2VRN(va));
607
    rr.word = rr_read(VA2VRN(va));
619
    rid = rr.map.rid;
608
    rid = rr.map.rid;
620
 
609
 
621
    page_table_lock(AS, true);
610
    page_table_lock(AS, true);
622
    t = page_mapping_find(AS, va);
611
    t = page_mapping_find(AS, va);
623
    ASSERT(t && t->p);
612
    ASSERT(t && t->p);
624
    if (t && t->p && t->w) {
613
    if (t && t->p && t->w) {
625
        /*
614
        /*
626
         * Update the Dirty bit in page tables and reinsert
615
         * Update the Dirty bit in page tables and reinsert
627
         * the mapping into DTC.
616
         * the mapping into DTC.
628
         */
617
         */
629
        t->d = true;
618
        t->d = true;
630
        dtc_pte_copy(t);
619
        dtc_pte_copy(t);
631
    } else {
620
    } else {
632
        if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
621
        if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
633
            fault_if_from_uspace(istate,"Page fault at %p",va);
622
            fault_if_from_uspace(istate,"Page fault at %p",va);
634
            panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
623
            panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
635
            t->d = true;
624
            t->d = true;
636
            dtc_pte_copy(t);
625
            dtc_pte_copy(t);
637
        }
626
        }
638
    }
627
    }
639
    page_table_unlock(AS, true);
628
    page_table_unlock(AS, true);
640
}
629
}
641
 
630
 
642
/** Instruction access bit fault handler.
631
/** Instruction access bit fault handler.
643
 *
632
 *
644
 * @param vector Interruption vector.
633
 * @param vector Interruption vector.
645
 * @param istate Structure with saved interruption state.
634
 * @param istate Structure with saved interruption state.
646
 */
635
 */
647
void instruction_access_bit_fault(uint64_t vector, istate_t *istate)
636
void instruction_access_bit_fault(uint64_t vector, istate_t *istate)
648
{
637
{
649
    region_register rr;
638
    region_register rr;
650
    rid_t rid;
639
    rid_t rid;
651
    uintptr_t va;
640
    uintptr_t va;
652
    pte_t *t;  
641
    pte_t *t;  
653
 
642
 
654
    va = istate->cr_ifa;    /* faulting address */
643
    va = istate->cr_ifa;    /* faulting address */
655
    rr.word = rr_read(VA2VRN(va));
644
    rr.word = rr_read(VA2VRN(va));
656
    rid = rr.map.rid;
645
    rid = rr.map.rid;
657
 
646
 
658
    page_table_lock(AS, true);
647
    page_table_lock(AS, true);
659
    t = page_mapping_find(AS, va);
648
    t = page_mapping_find(AS, va);
660
    ASSERT(t && t->p);
649
    ASSERT(t && t->p);
661
    if (t && t->p && t->x) {
650
    if (t && t->p && t->x) {
662
        /*
651
        /*
663
         * Update the Accessed bit in page tables and reinsert
652
         * Update the Accessed bit in page tables and reinsert
664
         * the mapping into ITC.
653
         * the mapping into ITC.
665
         */
654
         */
666
        t->a = true;
655
        t->a = true;
667
        itc_pte_copy(t);
656
        itc_pte_copy(t);
668
    } else {
657
    } else {
669
        if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
658
        if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
670
            fault_if_from_uspace(istate,"Page fault at %p",va);
659
            fault_if_from_uspace(istate,"Page fault at %p",va);
671
            panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
660
            panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
672
            t->a = true;
661
            t->a = true;
673
            itc_pte_copy(t);
662
            itc_pte_copy(t);
674
        }
663
        }
675
    }
664
    }
676
    page_table_unlock(AS, true);
665
    page_table_unlock(AS, true);
677
}
666
}
678
 
667
 
679
/** Data access bit fault handler.
668
/** Data access bit fault handler.
680
 *
669
 *
681
 * @param vector Interruption vector.
670
 * @param vector Interruption vector.
682
 * @param istate Structure with saved interruption state.
671
 * @param istate Structure with saved interruption state.
683
 */
672
 */
684
void data_access_bit_fault(uint64_t vector, istate_t *istate)
673
void data_access_bit_fault(uint64_t vector, istate_t *istate)
685
{
674
{
686
    region_register rr;
675
    region_register rr;
687
    rid_t rid;
676
    rid_t rid;
688
    uintptr_t va;
677
    uintptr_t va;
689
    pte_t *t;
678
    pte_t *t;
690
 
679
 
691
    va = istate->cr_ifa;    /* faulting address */
680
    va = istate->cr_ifa;    /* faulting address */
692
    rr.word = rr_read(VA2VRN(va));
681
    rr.word = rr_read(VA2VRN(va));
693
    rid = rr.map.rid;
682
    rid = rr.map.rid;
694
 
683
 
695
    page_table_lock(AS, true);
684
    page_table_lock(AS, true);
696
    t = page_mapping_find(AS, va);
685
    t = page_mapping_find(AS, va);
697
    ASSERT(t && t->p);
686
    ASSERT(t && t->p);
698
    if (t && t->p) {
687
    if (t && t->p) {
699
        /*
688
        /*
700
         * Update the Accessed bit in page tables and reinsert
689
         * Update the Accessed bit in page tables and reinsert
701
         * the mapping into DTC.
690
         * the mapping into DTC.
702
         */
691
         */
703
        t->a = true;
692
        t->a = true;
704
        dtc_pte_copy(t);
693
        dtc_pte_copy(t);
705
    } else {
694
    } else {
706
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
695
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
707
            fault_if_from_uspace(istate,"Page fault at %p",va);
696
            fault_if_from_uspace(istate,"Page fault at %p",va);
708
            panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
697
            panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
709
            t->a = true;
698
            t->a = true;
710
            itc_pte_copy(t);
699
            itc_pte_copy(t);
711
        }
700
        }
712
    }
701
    }
713
    page_table_unlock(AS, true);
702
    page_table_unlock(AS, true);
714
}
703
}
715
 
704
 
716
/** Page not present fault handler.
705
/** Page not present fault handler.
717
 *
706
 *
718
 * @param vector Interruption vector.
707
 * @param vector Interruption vector.
719
 * @param istate Structure with saved interruption state.
708
 * @param istate Structure with saved interruption state.
720
 */
709
 */
721
void page_not_present(uint64_t vector, istate_t *istate)
710
void page_not_present(uint64_t vector, istate_t *istate)
722
{
711
{
723
    region_register rr;
712
    region_register rr;
724
    rid_t rid;
713
    rid_t rid;
725
    uintptr_t va;
714
    uintptr_t va;
726
    pte_t *t;
715
    pte_t *t;
727
   
716
   
728
    va = istate->cr_ifa;    /* faulting address */
717
    va = istate->cr_ifa;    /* faulting address */
729
    rr.word = rr_read(VA2VRN(va));
718
    rr.word = rr_read(VA2VRN(va));
730
    rid = rr.map.rid;
719
    rid = rr.map.rid;
731
 
720
 
732
    page_table_lock(AS, true);
721
    page_table_lock(AS, true);
733
    t = page_mapping_find(AS, va);
722
    t = page_mapping_find(AS, va);
734
    ASSERT(t);
723
    ASSERT(t);
735
   
724
   
736
    if (t->p) {
725
    if (t->p) {
737
        /*
726
        /*
738
         * If the Present bit is set in page hash table, just copy it
727
         * If the Present bit is set in page hash table, just copy it
739
         * and update ITC/DTC.
728
         * and update ITC/DTC.
740
         */
729
         */
741
        if (t->x)
730
        if (t->x)
742
            itc_pte_copy(t);
731
            itc_pte_copy(t);
743
        else
732
        else
744
            dtc_pte_copy(t);
733
            dtc_pte_copy(t);
745
        page_table_unlock(AS, true);
734
        page_table_unlock(AS, true);
746
    } else {
735
    } else {
747
        page_table_unlock(AS, true);
736
        page_table_unlock(AS, true);
748
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
737
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
749
            fault_if_from_uspace(istate,"Page fault at %p",va);
738
            fault_if_from_uspace(istate,"Page fault at %p",va);
750
            panic("%s: va=%p, rid=%d\n", __func__, va, rid);
739
            panic("%s: va=%p, rid=%d\n", __func__, va, rid);
751
        }
740
        }
752
    }
741
    }
753
}
742
}
754
 
743
 
755
/** @}
744
/** @}
756
 */
745
 */
757
 
746