Subversion Repositories HelenOS

Rev

Rev 3674 | Rev 4339 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3674 Rev 4338
1
/*
1
/*
2
 * Copyright (c) 2006 Jakub Jermar
2
 * Copyright (c) 2006 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup ia64mm 
29
/** @addtogroup ia64mm 
30
 * @{
30
 * @{
31
 */
31
 */
32
/** @file
32
/** @file
33
 */
33
 */
34
 
34
 
35
/*
35
/*
36
 * TLB management.
36
 * TLB management.
37
 */
37
 */
38
 
38
 
39
#include <mm/tlb.h>
39
#include <mm/tlb.h>
40
#include <mm/asid.h>
40
#include <mm/asid.h>
41
#include <mm/page.h>
41
#include <mm/page.h>
42
#include <mm/as.h>
42
#include <mm/as.h>
43
#include <arch/mm/tlb.h>
43
#include <arch/mm/tlb.h>
44
#include <arch/mm/page.h>
44
#include <arch/mm/page.h>
45
#include <arch/mm/vhpt.h>
45
#include <arch/mm/vhpt.h>
46
#include <arch/barrier.h>
46
#include <arch/barrier.h>
47
#include <arch/interrupt.h>
47
#include <arch/interrupt.h>
48
#include <arch/pal/pal.h>
48
#include <arch/pal/pal.h>
49
#include <arch/asm.h>
49
#include <arch/asm.h>
50
#include <panic.h>
50
#include <panic.h>
51
#include <print.h>
51
#include <print.h>
52
#include <arch.h>
52
#include <arch.h>
53
#include <interrupt.h>
53
#include <interrupt.h>
54
 
54
 
55
/** Invalidate all TLB entries. */
55
/** Invalidate all TLB entries. */
56
void tlb_invalidate_all(void)
56
void tlb_invalidate_all(void)
57
{
57
{
58
    ipl_t ipl;
58
    ipl_t ipl;
59
    uintptr_t adr;
59
    uintptr_t adr;
60
    uint32_t count1, count2, stride1, stride2;
60
    uint32_t count1, count2, stride1, stride2;
61
       
61
       
62
    unsigned int i, j;
62
    unsigned int i, j;
63
       
63
       
64
    adr = PAL_PTCE_INFO_BASE();
64
    adr = PAL_PTCE_INFO_BASE();
65
    count1 = PAL_PTCE_INFO_COUNT1();
65
    count1 = PAL_PTCE_INFO_COUNT1();
66
    count2 = PAL_PTCE_INFO_COUNT2();
66
    count2 = PAL_PTCE_INFO_COUNT2();
67
    stride1 = PAL_PTCE_INFO_STRIDE1();
67
    stride1 = PAL_PTCE_INFO_STRIDE1();
68
    stride2 = PAL_PTCE_INFO_STRIDE2();
68
    stride2 = PAL_PTCE_INFO_STRIDE2();
69
       
69
       
70
    ipl = interrupts_disable();
70
    ipl = interrupts_disable();
71
 
71
 
72
    for (i = 0; i < count1; i++) {
72
    for (i = 0; i < count1; i++) {
73
        for (j = 0; j < count2; j++) {
73
        for (j = 0; j < count2; j++) {
74
            asm volatile (
74
            asm volatile (
75
                "ptc.e %0 ;;"
75
                "ptc.e %0 ;;"
76
                :
76
                :
77
                : "r" (adr)
77
                : "r" (adr)
78
            );
78
            );
79
            adr += stride2;
79
            adr += stride2;
80
        }
80
        }
81
        adr += stride1;
81
        adr += stride1;
82
    }
82
    }
83
 
83
 
84
    interrupts_restore(ipl);
84
    interrupts_restore(ipl);
85
 
85
 
86
    srlz_d();
86
    srlz_d();
87
    srlz_i();
87
    srlz_i();
88
#ifdef CONFIG_VHPT
88
#ifdef CONFIG_VHPT
89
    vhpt_invalidate_all();
89
    vhpt_invalidate_all();
90
#endif  
90
#endif  
91
}
91
}
92
 
92
 
93
/** Invalidate entries belonging to an address space.
93
/** Invalidate entries belonging to an address space.
94
 *
94
 *
95
 * @param asid Address space identifier.
95
 * @param asid      Address space identifier.
96
 */
96
 */
97
void tlb_invalidate_asid(asid_t asid)
97
void tlb_invalidate_asid(asid_t asid)
98
{
98
{
99
    tlb_invalidate_all();
99
    tlb_invalidate_all();
100
}
100
}
101
 
101
 
102
 
102
 
103
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
103
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
104
{
104
{
105
    region_register rr;
105
    region_register rr;
106
    bool restore_rr = false;
106
    bool restore_rr = false;
107
    int b = 0;
107
    int b = 0;
108
    int c = cnt;
108
    int c = cnt;
109
 
109
 
110
    uintptr_t va;
110
    uintptr_t va;
111
    va = page;
111
    va = page;
112
 
112
 
113
    rr.word = rr_read(VA2VRN(va));
113
    rr.word = rr_read(VA2VRN(va));
114
    if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
114
    if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
115
        /*
115
        /*
116
         * The selected region register does not contain required RID.
116
         * The selected region register does not contain required RID.
117
         * Save the old content of the register and replace the RID.
117
         * Save the old content of the register and replace the RID.
118
         */
118
         */
119
        region_register rr0;
119
        region_register rr0;
120
 
120
 
121
        rr0 = rr;
121
        rr0 = rr;
122
        rr0.map.rid = ASID2RID(asid, VA2VRN(va));
122
        rr0.map.rid = ASID2RID(asid, VA2VRN(va));
123
        rr_write(VA2VRN(va), rr0.word);
123
        rr_write(VA2VRN(va), rr0.word);
124
        srlz_d();
124
        srlz_d();
125
        srlz_i();
125
        srlz_i();
126
    }
126
    }
127
   
127
   
128
    while(c >>= 1)
128
    while(c >>= 1)
129
        b++;
129
        b++;
130
    b >>= 1;
130
    b >>= 1;
131
    uint64_t ps;
131
    uint64_t ps;
132
   
132
   
133
    switch (b) {
133
    switch (b) {
134
    case 0: /*cnt 1-3*/
134
    case 0: /* cnt 1 - 3 */
135
        ps = PAGE_WIDTH;
135
        ps = PAGE_WIDTH;
136
        break;
136
        break;
137
    case 1: /*cnt 4-15*/
137
    case 1: /* cnt 4 - 15 */
138
        /*cnt=((cnt-1)/4)+1;*/
-
 
139
        ps = PAGE_WIDTH+2;
138
        ps = PAGE_WIDTH + 2;
140
        va &= ~((1<<ps)-1);
139
        va &= ~((1 << ps) - 1);
141
        break;
140
        break;
142
    case 2: /*cnt 16-63*/
141
    case 2: /* cnt 16 - 63 */
143
        /*cnt=((cnt-1)/16)+1;*/
-
 
144
        ps = PAGE_WIDTH+4;
142
        ps = PAGE_WIDTH + 4;
145
        va &= ~((1<<ps)-1);
143
        va &= ~((1 << ps) - 1);
146
        break;
144
        break;
147
    case 3: /*cnt 64-255*/
145
    case 3: /* cnt 64 - 255 */
148
        /*cnt=((cnt-1)/64)+1;*/
-
 
149
        ps = PAGE_WIDTH+6;
146
        ps = PAGE_WIDTH + 6;
150
        va &= ~((1<<ps)-1);
147
        va &= ~((1 << ps) - 1);
151
        break;
148
        break;
152
    case 4: /*cnt 256-1023*/
149
    case 4: /* cnt 256 - 1023 */
153
        /*cnt=((cnt-1)/256)+1;*/
-
 
154
        ps = PAGE_WIDTH+8;
150
        ps = PAGE_WIDTH + 8;
155
        va &= ~((1<<ps)-1);
151
        va &= ~((1 << ps) - 1);
156
        break;
152
        break;
157
    case 5: /*cnt 1024-4095*/
153
    case 5: /* cnt 1024 - 4095 */
158
        /*cnt=((cnt-1)/1024)+1;*/
-
 
159
        ps = PAGE_WIDTH+10;
154
        ps = PAGE_WIDTH + 10;
160
        va &= ~((1<<ps)-1);
155
        va &= ~((1 << ps) - 1);
161
        break;
156
        break;
162
    case 6: /*cnt 4096-16383*/
157
    case 6: /* cnt 4096 - 16383 */
163
        /*cnt=((cnt-1)/4096)+1;*/
-
 
164
        ps = PAGE_WIDTH+12;
158
        ps = PAGE_WIDTH + 12;
165
        va &= ~((1<<ps)-1);
159
        va &= ~((1 << ps) - 1);
166
        break;
160
        break;
167
    case 7: /*cnt 16384-65535*/
161
    case 7: /* cnt 16384 - 65535 */
168
    case 8: /*cnt 65536-(256K-1)*/
162
    case 8: /* cnt 65536 - (256K - 1) */
169
        /*cnt=((cnt-1)/16384)+1;*/
-
 
170
        ps = PAGE_WIDTH+14;
163
        ps = PAGE_WIDTH + 14;
171
        va &= ~((1<<ps)-1);
164
        va &= ~((1 << ps) - 1);
172
        break;
165
        break;
173
    default:
166
    default:
174
        /*cnt=((cnt-1)/(16384*16))+1;*/
-
 
175
        ps=PAGE_WIDTH+18;
167
        ps = PAGE_WIDTH + 18;
176
        va&=~((1<<ps)-1);
168
        va &= ~((1 << ps) - 1);
177
        break;
169
        break;
178
    }
170
    }
179
    /*cnt+=(page!=va);*/
-
 
180
    for(; va<(page+cnt*(PAGE_SIZE)); va += (1<<ps)) {
171
    for(; va < (page + cnt * PAGE_SIZE); va += (1 << ps))
181
        asm volatile (
-
 
182
            "ptc.l %0,%1;;"
-
 
183
            :
-
 
184
            : "r" (va), "r" (ps<<2)
172
        asm volatile ("ptc.l %0, %1;;" :: "r" (va), "r" (ps << 2));
185
        );
-
 
186
    }
-
 
187
    srlz_d();
173
    srlz_d();
188
    srlz_i();
174
    srlz_i();
189
   
175
   
190
    if (restore_rr) {
176
    if (restore_rr) {
191
        rr_write(VA2VRN(va), rr.word);
177
        rr_write(VA2VRN(va), rr.word);
192
        srlz_d();
178
        srlz_d();
193
        srlz_i();
179
        srlz_i();
194
    }
180
    }
195
}
181
}
196
 
182
 
197
/** Insert data into data translation cache.
183
/** Insert data into data translation cache.
198
 *
184
 *
199
 * @param va Virtual page address.
185
 * @param va        Virtual page address.
200
 * @param asid Address space identifier.
186
 * @param asid      Address space identifier.
201
 * @param entry The rest of TLB entry as required by TLB insertion format.
187
 * @param entry     The rest of TLB entry as required by TLB insertion
-
 
188
 *          format.
202
 */
189
 */
203
void dtc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
190
void dtc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
204
{
191
{
205
    tc_mapping_insert(va, asid, entry, true);
192
    tc_mapping_insert(va, asid, entry, true);
206
}
193
}
207
 
194
 
208
/** Insert data into instruction translation cache.
195
/** Insert data into instruction translation cache.
209
 *
196
 *
210
 * @param va Virtual page address.
197
 * @param va        Virtual page address.
211
 * @param asid Address space identifier.
198
 * @param asid      Address space identifier.
212
 * @param entry The rest of TLB entry as required by TLB insertion format.
199
 * @param entry     The rest of TLB entry as required by TLB insertion
-
 
200
 *          format.
213
 */
201
 */
214
void itc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
202
void itc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
215
{
203
{
216
    tc_mapping_insert(va, asid, entry, false);
204
    tc_mapping_insert(va, asid, entry, false);
217
}
205
}
218
 
206
 
219
/** Insert data into instruction or data translation cache.
207
/** Insert data into instruction or data translation cache.
220
 *
208
 *
221
 * @param va Virtual page address.
209
 * @param va        Virtual page address.
222
 * @param asid Address space identifier.
210
 * @param asid      Address space identifier.
223
 * @param entry The rest of TLB entry as required by TLB insertion format.
211
 * @param entry     The rest of TLB entry as required by TLB insertion
-
 
212
 *          format.
224
 * @param dtc If true, insert into data translation cache, use instruction translation cache otherwise.
213
 * @param dtc       If true, insert into data translation cache, use
-
 
214
 *          instruction translation cache otherwise.
225
 */
215
 */
226
void tc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtc)
216
void tc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtc)
227
{
217
{
228
    region_register rr;
218
    region_register rr;
229
    bool restore_rr = false;
219
    bool restore_rr = false;
230
 
220
 
231
    rr.word = rr_read(VA2VRN(va));
221
    rr.word = rr_read(VA2VRN(va));
232
    if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
222
    if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
233
        /*
223
        /*
234
         * The selected region register does not contain required RID.
224
         * The selected region register does not contain required RID.
235
         * Save the old content of the register and replace the RID.
225
         * Save the old content of the register and replace the RID.
236
         */
226
         */
237
        region_register rr0;
227
        region_register rr0;
238
 
228
 
239
        rr0 = rr;
229
        rr0 = rr;
240
        rr0.map.rid = ASID2RID(asid, VA2VRN(va));
230
        rr0.map.rid = ASID2RID(asid, VA2VRN(va));
241
        rr_write(VA2VRN(va), rr0.word);
231
        rr_write(VA2VRN(va), rr0.word);
242
        srlz_d();
232
        srlz_d();
243
        srlz_i();
233
        srlz_i();
244
    }
234
    }
245
   
235
   
246
    asm volatile (
236
    asm volatile (
247
        "mov r8=psr;;\n"
237
        "mov r8 = psr;;\n"
248
        "rsm %0;;\n"            /* PSR_IC_MASK */
238
        "rsm %0;;\n"            /* PSR_IC_MASK */
249
        "srlz.d;;\n"
239
        "srlz.d;;\n"
250
        "srlz.i;;\n"
240
        "srlz.i;;\n"
251
        "mov cr.ifa=%1\n"       /* va */
241
        "mov cr.ifa = %1\n"     /* va */
252
        "mov cr.itir=%2;;\n"        /* entry.word[1] */
242
        "mov cr.itir = %2;;\n"      /* entry.word[1] */
253
        "cmp.eq p6,p7 = %4,r0;;\n"  /* decide between itc and dtc */
243
        "cmp.eq p6,p7 = %4,r0;;\n"  /* decide between itc and dtc */
254
        "(p6) itc.i %3;;\n"
244
        "(p6) itc.i %3;;\n"
255
        "(p7) itc.d %3;;\n"
245
        "(p7) itc.d %3;;\n"
256
        "mov psr.l=r8;;\n"
246
        "mov psr.l = r8;;\n"
257
        "srlz.d;;\n"
247
        "srlz.d;;\n"
258
        :
248
        :
259
        : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc)
249
        : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]),
-
 
250
            "r" (entry.word[0]), "r" (dtc)
260
        : "p6", "p7", "r8"
251
        : "p6", "p7", "r8"
261
    );
252
    );
262
   
253
   
263
    if (restore_rr) {
254
    if (restore_rr) {
264
        rr_write(VA2VRN(va), rr.word);
255
        rr_write(VA2VRN(va), rr.word);
265
        srlz_d();
256
        srlz_d();
266
        srlz_i();
257
        srlz_i();
267
    }
258
    }
268
}
259
}
269
 
260
 
270
/** Insert data into instruction translation register.
261
/** Insert data into instruction translation register.
271
 *
262
 *
272
 * @param va Virtual page address.
263
 * @param va        Virtual page address.
273
 * @param asid Address space identifier.
264
 * @param asid      Address space identifier.
274
 * @param entry The rest of TLB entry as required by TLB insertion format.
265
 * @param entry     The rest of TLB entry as required by TLB insertion
-
 
266
 *          format.
275
 * @param tr Translation register.
267
 * @param tr        Translation register.
276
 */
268
 */
-
 
269
void
277
void itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr)
270
itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr)
278
{
271
{
279
    tr_mapping_insert(va, asid, entry, false, tr);
272
    tr_mapping_insert(va, asid, entry, false, tr);
280
}
273
}
281
 
274
 
282
/** Insert data into data translation register.
275
/** Insert data into data translation register.
283
 *
276
 *
284
 * @param va Virtual page address.
277
 * @param va        Virtual page address.
285
 * @param asid Address space identifier.
278
 * @param asid      Address space identifier.
286
 * @param entry The rest of TLB entry as required by TLB insertion format.
279
 * @param entry     The rest of TLB entry as required by TLB insertion
-
 
280
 *          format.
287
 * @param tr Translation register.
281
 * @param tr        Translation register.
288
 */
282
 */
-
 
283
void
289
void dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr)
284
dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr)
290
{
285
{
291
    tr_mapping_insert(va, asid, entry, true, tr);
286
    tr_mapping_insert(va, asid, entry, true, tr);
292
}
287
}
293
 
288
 
294
/** Insert data into instruction or data translation register.
289
/** Insert data into instruction or data translation register.
295
 *
290
 *
296
 * @param va Virtual page address.
291
 * @param va        Virtual page address.
297
 * @param asid Address space identifier.
292
 * @param asid      Address space identifier.
298
 * @param entry The rest of TLB entry as required by TLB insertion format.
293
 * @param entry     The rest of TLB entry as required by TLB insertion
-
 
294
 *          format.
299
 * @param dtr If true, insert into data translation register, use instruction translation register otherwise.
295
 * @param dtr       If true, insert into data translation register, use
-
 
296
 *          instruction translation register otherwise.
300
 * @param tr Translation register.
297
 * @param tr        Translation register.
301
 */
298
 */
-
 
299
void
302
void tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr)
300
tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr,
-
 
301
    index_t tr)
303
{
302
{
304
    region_register rr;
303
    region_register rr;
305
    bool restore_rr = false;
304
    bool restore_rr = false;
306
 
305
 
307
    rr.word = rr_read(VA2VRN(va));
306
    rr.word = rr_read(VA2VRN(va));
308
    if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
307
    if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
309
        /*
308
        /*
310
         * The selected region register does not contain required RID.
309
         * The selected region register does not contain required RID.
311
         * Save the old content of the register and replace the RID.
310
         * Save the old content of the register and replace the RID.
312
         */
311
         */
313
        region_register rr0;
312
        region_register rr0;
314
 
313
 
315
        rr0 = rr;
314
        rr0 = rr;
316
        rr0.map.rid = ASID2RID(asid, VA2VRN(va));
315
        rr0.map.rid = ASID2RID(asid, VA2VRN(va));
317
        rr_write(VA2VRN(va), rr0.word);
316
        rr_write(VA2VRN(va), rr0.word);
318
        srlz_d();
317
        srlz_d();
319
        srlz_i();
318
        srlz_i();
320
    }
319
    }
321
 
320
 
322
    asm volatile (
321
    asm volatile (
323
        "mov r8=psr;;\n"
322
        "mov r8 = psr;;\n"
324
        "rsm %0;;\n"            /* PSR_IC_MASK */
323
        "rsm %0;;\n"            /* PSR_IC_MASK */
325
        "srlz.d;;\n"
324
        "srlz.d;;\n"
326
        "srlz.i;;\n"
325
        "srlz.i;;\n"
327
        "mov cr.ifa=%1\n"           /* va */         
326
        "mov cr.ifa = %1\n"         /* va */         
328
        "mov cr.itir=%2;;\n"        /* entry.word[1] */
327
        "mov cr.itir = %2;;\n"      /* entry.word[1] */
329
        "cmp.eq p6,p7=%5,r0;;\n"    /* decide between itr and dtr */
328
        "cmp.eq p6,p7 = %5,r0;;\n"  /* decide between itr and dtr */
330
        "(p6) itr.i itr[%4]=%3;;\n"
329
        "(p6) itr.i itr[%4] = %3;;\n"
331
        "(p7) itr.d dtr[%4]=%3;;\n"
330
        "(p7) itr.d dtr[%4] = %3;;\n"
332
        "mov psr.l=r8;;\n"
331
        "mov psr.l = r8;;\n"
333
        "srlz.d;;\n"
332
        "srlz.d;;\n"
334
        :
333
        :
335
        : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr)
334
        : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]),
-
 
335
            "r" (entry.word[0]), "r" (tr), "r" (dtr)
336
        : "p6", "p7", "r8"
336
        : "p6", "p7", "r8"
337
    );
337
    );
338
   
338
   
339
    if (restore_rr) {
339
    if (restore_rr) {
340
        rr_write(VA2VRN(va), rr.word);
340
        rr_write(VA2VRN(va), rr.word);
341
        srlz_d();
341
        srlz_d();
342
        srlz_i();
342
        srlz_i();
343
    }
343
    }
344
}
344
}
345
 
345
 
346
/** Insert data into DTLB.
346
/** Insert data into DTLB.
347
 *
347
 *
348
 * @param page Virtual page address including VRN bits.
348
 * @param page      Virtual page address including VRN bits.
349
 * @param frame Physical frame address.
349
 * @param frame     Physical frame address.
350
 * @param dtr If true, insert into data translation register, use data translation cache otherwise.
350
 * @param dtr       If true, insert into data translation register, use data
-
 
351
 *          translation cache otherwise.
351
 * @param tr Translation register if dtr is true, ignored otherwise.
352
 * @param tr        Translation register if dtr is true, ignored otherwise.
352
 */
353
 */
-
 
354
void
353
void dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr, index_t tr)
355
dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr,
-
 
356
    index_t tr)
354
{
357
{
355
    tlb_entry_t entry;
358
    tlb_entry_t entry;
356
   
359
   
357
    entry.word[0] = 0;
360
    entry.word[0] = 0;
358
    entry.word[1] = 0;
361
    entry.word[1] = 0;
359
   
362
   
360
    entry.p = true;         /* present */
363
    entry.p = true;         /* present */
361
    entry.ma = MA_WRITEBACK;
364
    entry.ma = MA_WRITEBACK;
362
    entry.a = true;         /* already accessed */
365
    entry.a = true;         /* already accessed */
363
    entry.d = true;         /* already dirty */
366
    entry.d = true;         /* already dirty */
364
    entry.pl = PL_KERNEL;
367
    entry.pl = PL_KERNEL;
365
    entry.ar = AR_READ | AR_WRITE;
368
    entry.ar = AR_READ | AR_WRITE;
366
    entry.ppn = frame >> PPN_SHIFT;
369
    entry.ppn = frame >> PPN_SHIFT;
367
    entry.ps = PAGE_WIDTH;
370
    entry.ps = PAGE_WIDTH;
368
   
371
   
369
    if (dtr)
372
    if (dtr)
370
        dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
373
        dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
371
    else
374
    else
372
        dtc_mapping_insert(page, ASID_KERNEL, entry);
375
        dtc_mapping_insert(page, ASID_KERNEL, entry);
373
}
376
}
374
 
377
 
375
/** Purge kernel entries from DTR.
378
/** Purge kernel entries from DTR.
376
 *
379
 *
377
 * Purge DTR entries used by the kernel.
380
 * Purge DTR entries used by the kernel.
378
 *
381
 *
379
 * @param page Virtual page address including VRN bits.
382
 * @param page      Virtual page address including VRN bits.
380
 * @param width Width of the purge in bits.
383
 * @param width     Width of the purge in bits.
381
 */
384
 */
382
void dtr_purge(uintptr_t page, count_t width)
385
void dtr_purge(uintptr_t page, count_t width)
383
{
386
{
384
    asm volatile ("ptr.d %0, %1\n" : : "r" (page), "r" (width<<2));
387
    asm volatile ("ptr.d %0, %1\n" : : "r" (page), "r" (width << 2));
385
}
388
}
386
 
389
 
387
 
390
 
388
/** Copy content of PTE into data translation cache.
391
/** Copy content of PTE into data translation cache.
389
 *
392
 *
390
 * @param t PTE.
393
 * @param t     PTE.
391
 */
394
 */
392
void dtc_pte_copy(pte_t *t)
395
void dtc_pte_copy(pte_t *t)
393
{
396
{
394
    tlb_entry_t entry;
397
    tlb_entry_t entry;
395
 
398
 
396
    entry.word[0] = 0;
399
    entry.word[0] = 0;
397
    entry.word[1] = 0;
400
    entry.word[1] = 0;
398
   
401
   
399
    entry.p = t->p;
402
    entry.p = t->p;
400
    entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
403
    entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
401
    entry.a = t->a;
404
    entry.a = t->a;
402
    entry.d = t->d;
405
    entry.d = t->d;
403
    entry.pl = t->k ? PL_KERNEL : PL_USER;
406
    entry.pl = t->k ? PL_KERNEL : PL_USER;
404
    entry.ar = t->w ? AR_WRITE : AR_READ;
407
    entry.ar = t->w ? AR_WRITE : AR_READ;
405
    entry.ppn = t->frame >> PPN_SHIFT;
408
    entry.ppn = t->frame >> PPN_SHIFT;
406
    entry.ps = PAGE_WIDTH;
409
    entry.ps = PAGE_WIDTH;
407
   
410
   
408
    dtc_mapping_insert(t->page, t->as->asid, entry);
411
    dtc_mapping_insert(t->page, t->as->asid, entry);
409
#ifdef CONFIG_VHPT
412
#ifdef CONFIG_VHPT
410
    vhpt_mapping_insert(t->page, t->as->asid, entry);
413
    vhpt_mapping_insert(t->page, t->as->asid, entry);
411
#endif  
414
#endif  
412
}
415
}
413
 
416
 
414
/** Copy content of PTE into instruction translation cache.
417
/** Copy content of PTE into instruction translation cache.
415
 *
418
 *
416
 * @param t PTE.
419
 * @param t     PTE.
417
 */
420
 */
418
void itc_pte_copy(pte_t *t)
421
void itc_pte_copy(pte_t *t)
419
{
422
{
420
    tlb_entry_t entry;
423
    tlb_entry_t entry;
421
 
424
 
422
    entry.word[0] = 0;
425
    entry.word[0] = 0;
423
    entry.word[1] = 0;
426
    entry.word[1] = 0;
424
   
427
   
425
    ASSERT(t->x);
428
    ASSERT(t->x);
426
   
429
   
427
    entry.p = t->p;
430
    entry.p = t->p;
428
    entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
431
    entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
429
    entry.a = t->a;
432
    entry.a = t->a;
430
    entry.pl = t->k ? PL_KERNEL : PL_USER;
433
    entry.pl = t->k ? PL_KERNEL : PL_USER;
431
    entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
434
    entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
432
    entry.ppn = t->frame >> PPN_SHIFT;
435
    entry.ppn = t->frame >> PPN_SHIFT;
433
    entry.ps = PAGE_WIDTH;
436
    entry.ps = PAGE_WIDTH;
434
   
437
   
435
    itc_mapping_insert(t->page, t->as->asid, entry);
438
    itc_mapping_insert(t->page, t->as->asid, entry);
436
#ifdef CONFIG_VHPT
439
#ifdef CONFIG_VHPT
437
    vhpt_mapping_insert(t->page, t->as->asid, entry);
440
    vhpt_mapping_insert(t->page, t->as->asid, entry);
438
#endif  
441
#endif  
439
}
442
}
440
 
443
 
441
/** Instruction TLB fault handler for faults with VHPT turned off.
444
/** Instruction TLB fault handler for faults with VHPT turned off.
442
 *
445
 *
443
 * @param vector Interruption vector.
446
 * @param vector        Interruption vector.
444
 * @param istate Structure with saved interruption state.
447
 * @param istate        Structure with saved interruption state.
445
 */
448
 */
446
void alternate_instruction_tlb_fault(uint64_t vector, istate_t *istate)
449
void alternate_instruction_tlb_fault(uint64_t vector, istate_t *istate)
447
{
450
{
448
    region_register rr;
451
    region_register rr;
449
    rid_t rid;
452
    rid_t rid;
450
    uintptr_t va;
453
    uintptr_t va;
451
    pte_t *t;
454
    pte_t *t;
452
   
455
   
453
    va = istate->cr_ifa;    /* faulting address */
456
    va = istate->cr_ifa;    /* faulting address */
454
    rr.word = rr_read(VA2VRN(va));
457
    rr.word = rr_read(VA2VRN(va));
455
    rid = rr.map.rid;
458
    rid = rr.map.rid;
456
 
459
 
457
    page_table_lock(AS, true);
460
    page_table_lock(AS, true);
458
    t = page_mapping_find(AS, va);
461
    t = page_mapping_find(AS, va);
459
    if (t) {
462
    if (t) {
460
        /*
463
        /*
461
         * The mapping was found in software page hash table.
464
         * The mapping was found in software page hash table.
462
         * Insert it into data translation cache.
465
         * Insert it into data translation cache.
463
         */
466
         */
464
        itc_pte_copy(t);
467
        itc_pte_copy(t);
465
        page_table_unlock(AS, true);
468
        page_table_unlock(AS, true);
466
    } else {
469
    } else {
467
        /*
470
        /*
468
         * Forward the page fault to address space page fault handler.
471
         * Forward the page fault to address space page fault handler.
469
         */
472
         */
470
        page_table_unlock(AS, true);
473
        page_table_unlock(AS, true);
471
        if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
474
        if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
472
            fault_if_from_uspace(istate,"Page fault at %p",va);
475
            fault_if_from_uspace(istate,"Page fault at %p",va);
473
            panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
476
            panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid,
-
 
477
                istate->cr_iip);
474
        }
478
        }
475
    }
479
    }
476
}
480
}
477
 
481
 
478
 
-
 
479
 
-
 
480
static int is_io_page_accessible(int page)
482
static int is_io_page_accessible(int page)
481
{
483
{
-
 
484
    if (TASK->arch.iomap)
482
    if(TASK->arch.iomap) return bitmap_get(TASK->arch.iomap,page);
485
        return bitmap_get(TASK->arch.iomap, page);
-
 
486
    else
483
    else return 0;
487
        return 0;
484
}
488
}
485
 
489
 
486
#define IO_FRAME_BASE 0xFFFFC000000
490
#define IO_FRAME_BASE 0xFFFFC000000
487
 
491
 
-
 
492
/**
488
/** There is special handling of memmaped lagacy io, because
493
 * There is special handling of memory mapped legacy io, because of 4KB sized
489
 * of 4KB sized access
-
 
490
 * only for userspace
494
 * access for userspace.
491
 *
-
 
492
 * @param va virtual address of page fault
-
 
493
 * @param istate Structure with saved interruption state.
-
 
494
 *
495
 *
-
 
496
 * @param va        Virtual address of page fault.
-
 
497
 * @param istate    Structure with saved interruption state.
495
 *
498
 *
496
 * @return 1 on success, 0 on fail
499
 * @return      One on success, zero on failure.
497
 */
500
 */
498
static int try_memmap_io_insertion(uintptr_t va, istate_t *istate)
501
static int try_memmap_io_insertion(uintptr_t va, istate_t *istate)
499
{
502
{
500
    if((va >= IO_OFFSET ) && (va < IO_OFFSET + (1<<IO_PAGE_WIDTH)))
503
    if ((va >= IO_OFFSET ) && (va < IO_OFFSET + (1 << IO_PAGE_WIDTH))) {
501
        if(TASK){
504
        if (TASK) {
502
           
-
 
503
            uint64_t io_page=(va &  ((1<<IO_PAGE_WIDTH)-1)) >> (USPACE_IO_PAGE_WIDTH);
505
            uint64_t io_page = (va & ((1 << IO_PAGE_WIDTH) - 1)) >>
504
            if(is_io_page_accessible(io_page)){
-
 
505
                //printf("Insert %llX\n",va);
506
                USPACE_IO_PAGE_WIDTH;
506
 
507
 
-
 
508
            if (is_io_page_accessible(io_page)) {
507
                uint64_t page,frame;
509
                uint64_t page, frame;
508
 
510
 
-
 
511
                page = IO_OFFSET +
509
                page = IO_OFFSET + (1 << USPACE_IO_PAGE_WIDTH) * io_page;
512
                    (1 << USPACE_IO_PAGE_WIDTH) * io_page;
-
 
513
                frame = IO_FRAME_BASE +
510
                frame = IO_FRAME_BASE + (1 << USPACE_IO_PAGE_WIDTH) * io_page;
514
                    (1 << USPACE_IO_PAGE_WIDTH) * io_page;
511
 
-
 
512
 
515
 
513
                tlb_entry_t entry;
516
                tlb_entry_t entry;
514
   
517
   
515
                entry.word[0] = 0;
518
                entry.word[0] = 0;
516
                entry.word[1] = 0;
519
                entry.word[1] = 0;
517
   
520
   
518
                entry.p = true;         /* present */
521
                entry.p = true;     /* present */
519
                entry.ma = MA_UNCACHEABLE;     
522
                entry.ma = MA_UNCACHEABLE;     
520
                entry.a = true;         /* already accessed */
523
                entry.a = true;     /* already accessed */
521
                entry.d = true;         /* already dirty */
524
                entry.d = true;     /* already dirty */
522
                entry.pl = PL_USER;
525
                entry.pl = PL_USER;
523
                entry.ar = AR_READ | AR_WRITE;
526
                entry.ar = AR_READ | AR_WRITE;
524
                entry.ppn = frame >> PPN_SHIFT;    //MUSIM spocitat frame
527
                entry.ppn = frame >> PPN_SHIFT;
525
                entry.ps = USPACE_IO_PAGE_WIDTH;
528
                entry.ps = USPACE_IO_PAGE_WIDTH;
526
   
529
   
527
                dtc_mapping_insert(page, TASK->as->asid, entry); //Musim zjistit ASID
530
                dtc_mapping_insert(page, TASK->as->asid, entry);
528
                return 1;
531
                return 1;
529
            }else {
532
            } else {
530
                fault_if_from_uspace(istate,"IO access fault at %p",va);
533
                fault_if_from_uspace(istate,
531
                return 0;
534
                    "IO access fault at %p", va);
532
            }      
535
            }
533
        } else
536
        }
534
            return 0;
-
 
535
    else
537
    }
536
        return 0;
-
 
537
       
538
       
538
    return 0;
539
    return 0;
539
 
-
 
540
}
540
}
541
 
541
 
542
 
-
 
543
 
-
 
544
 
-
 
545
/** Data TLB fault handler for faults with VHPT turned off.
542
/** Data TLB fault handler for faults with VHPT turned off.
546
 *
543
 *
547
 * @param vector Interruption vector.
544
 * @param vector    Interruption vector.
548
 * @param istate Structure with saved interruption state.
545
 * @param istate    Structure with saved interruption state.
549
 */
546
 */
550
void alternate_data_tlb_fault(uint64_t vector, istate_t *istate)
547
void alternate_data_tlb_fault(uint64_t vector, istate_t *istate)
551
{
548
{
552
    region_register rr;
549
    region_register rr;
553
    rid_t rid;
550
    rid_t rid;
554
    uintptr_t va;
551
    uintptr_t va;
555
    pte_t *t;
552
    pte_t *t;
556
   
553
   
557
    va = istate->cr_ifa;    /* faulting address */
554
    va = istate->cr_ifa;    /* faulting address */
558
    rr.word = rr_read(VA2VRN(va));
555
    rr.word = rr_read(VA2VRN(va));
559
    rid = rr.map.rid;
556
    rid = rr.map.rid;
560
    if (RID2ASID(rid) == ASID_KERNEL) {
557
    if (RID2ASID(rid) == ASID_KERNEL) {
561
        if (VA2VRN(va) == VRN_KERNEL) {
558
        if (VA2VRN(va) == VRN_KERNEL) {
562
            /*
559
            /*
563
             * Provide KA2PA(identity) mapping for faulting piece of
560
             * Provide KA2PA(identity) mapping for faulting piece of
564
             * kernel address space.
561
             * kernel address space.
565
             */
562
             */
566
            dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
563
            dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
567
            return;
564
            return;
568
        }
565
        }
569
    }
566
    }
570
 
567
 
571
    page_table_lock(AS, true);
568
    page_table_lock(AS, true);
572
    t = page_mapping_find(AS, va);
569
    t = page_mapping_find(AS, va);
573
    if (t) {
570
    if (t) {
574
        /*
571
        /*
575
         * The mapping was found in the software page hash table.
572
         * The mapping was found in the software page hash table.
576
         * Insert it into data translation cache.
573
         * Insert it into data translation cache.
577
         */
574
         */
578
        dtc_pte_copy(t);
575
        dtc_pte_copy(t);
579
        page_table_unlock(AS, true);
576
        page_table_unlock(AS, true);
580
    } else {
577
    } else {
581
        page_table_unlock(AS, true);
578
        page_table_unlock(AS, true);
582
        if (try_memmap_io_insertion(va,istate)) return;
579
        if (try_memmap_io_insertion(va, istate))
-
 
580
            return;
583
        /*
581
        /*
584
         * Forward the page fault to the address space page fault handler.
582
         * Forward the page fault to the address space page fault
-
 
583
         * handler.
585
         */
584
         */
586
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
585
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
587
            fault_if_from_uspace(istate,"Page fault at %p",va);
586
            fault_if_from_uspace(istate,"Page fault at %p",va);
588
            panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
587
            panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid,
-
 
588
                istate->cr_iip);
589
        }
589
        }
590
    }
590
    }
591
}
591
}
592
 
592
 
593
/** Data nested TLB fault handler.
593
/** Data nested TLB fault handler.
594
 *
594
 *
595
 * This fault should not occur.
595
 * This fault should not occur.
596
 *
596
 *
597
 * @param vector Interruption vector.
597
 * @param vector    Interruption vector.
598
 * @param istate Structure with saved interruption state.
598
 * @param istate    Structure with saved interruption state.
599
 */
599
 */
600
void data_nested_tlb_fault(uint64_t vector, istate_t *istate)
600
void data_nested_tlb_fault(uint64_t vector, istate_t *istate)
601
{
601
{
602
    panic("%s\n", __func__);
602
    panic("%s\n", __func__);
603
}
603
}
604
 
604
 
605
/** Data Dirty bit fault handler.
605
/** Data Dirty bit fault handler.
606
 *
606
 *
607
 * @param vector Interruption vector.
607
 * @param vector    Interruption vector.
608
 * @param istate Structure with saved interruption state.
608
 * @param istate    Structure with saved interruption state.
609
 */
609
 */
610
void data_dirty_bit_fault(uint64_t vector, istate_t *istate)
610
void data_dirty_bit_fault(uint64_t vector, istate_t *istate)
611
{
611
{
612
    region_register rr;
612
    region_register rr;
613
    rid_t rid;
613
    rid_t rid;
614
    uintptr_t va;
614
    uintptr_t va;
615
    pte_t *t;
615
    pte_t *t;
616
   
616
   
617
    va = istate->cr_ifa;    /* faulting address */
617
    va = istate->cr_ifa;    /* faulting address */
618
    rr.word = rr_read(VA2VRN(va));
618
    rr.word = rr_read(VA2VRN(va));
619
    rid = rr.map.rid;
619
    rid = rr.map.rid;
620
 
620
 
621
    page_table_lock(AS, true);
621
    page_table_lock(AS, true);
622
    t = page_mapping_find(AS, va);
622
    t = page_mapping_find(AS, va);
623
    ASSERT(t && t->p);
623
    ASSERT(t && t->p);
624
    if (t && t->p && t->w) {
624
    if (t && t->p && t->w) {
625
        /*
625
        /*
626
         * Update the Dirty bit in page tables and reinsert
626
         * Update the Dirty bit in page tables and reinsert
627
         * the mapping into DTC.
627
         * the mapping into DTC.
628
         */
628
         */
629
        t->d = true;
629
        t->d = true;
630
        dtc_pte_copy(t);
630
        dtc_pte_copy(t);
631
    } else {
631
    } else {
632
        if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
632
        if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
633
            fault_if_from_uspace(istate,"Page fault at %p",va);
633
            fault_if_from_uspace(istate,"Page fault at %p",va);
634
            panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
634
            panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid,
635
            t->d = true;
-
 
636
            dtc_pte_copy(t);
635
                istate->cr_iip);
637
        }
636
        }
638
    }
637
    }
639
    page_table_unlock(AS, true);
638
    page_table_unlock(AS, true);
640
}
639
}
641
 
640
 
642
/** Instruction access bit fault handler.
641
/** Instruction access bit fault handler.
643
 *
642
 *
644
 * @param vector Interruption vector.
643
 * @param vector    Interruption vector.
645
 * @param istate Structure with saved interruption state.
644
 * @param istate    Structure with saved interruption state.
646
 */
645
 */
647
void instruction_access_bit_fault(uint64_t vector, istate_t *istate)
646
void instruction_access_bit_fault(uint64_t vector, istate_t *istate)
648
{
647
{
649
    region_register rr;
648
    region_register rr;
650
    rid_t rid;
649
    rid_t rid;
651
    uintptr_t va;
650
    uintptr_t va;
652
    pte_t *t;  
651
    pte_t *t;  
653
 
652
 
654
    va = istate->cr_ifa;    /* faulting address */
653
    va = istate->cr_ifa;    /* faulting address */
655
    rr.word = rr_read(VA2VRN(va));
654
    rr.word = rr_read(VA2VRN(va));
656
    rid = rr.map.rid;
655
    rid = rr.map.rid;
657
 
656
 
658
    page_table_lock(AS, true);
657
    page_table_lock(AS, true);
659
    t = page_mapping_find(AS, va);
658
    t = page_mapping_find(AS, va);
660
    ASSERT(t && t->p);
659
    ASSERT(t && t->p);
661
    if (t && t->p && t->x) {
660
    if (t && t->p && t->x) {
662
        /*
661
        /*
663
         * Update the Accessed bit in page tables and reinsert
662
         * Update the Accessed bit in page tables and reinsert
664
         * the mapping into ITC.
663
         * the mapping into ITC.
665
         */
664
         */
666
        t->a = true;
665
        t->a = true;
667
        itc_pte_copy(t);
666
        itc_pte_copy(t);
668
    } else {
667
    } else {
669
        if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
668
        if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
670
            fault_if_from_uspace(istate,"Page fault at %p",va);
669
            fault_if_from_uspace(istate, "Page fault at %p", va);
671
            panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
670
            panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid,
672
            t->a = true;
-
 
673
            itc_pte_copy(t);
671
                istate->cr_iip);
674
        }
672
        }
675
    }
673
    }
676
    page_table_unlock(AS, true);
674
    page_table_unlock(AS, true);
677
}
675
}
678
 
676
 
679
/** Data access bit fault handler.
677
/** Data access bit fault handler.
680
 *
678
 *
681
 * @param vector Interruption vector.
679
 * @param vector Interruption vector.
682
 * @param istate Structure with saved interruption state.
680
 * @param istate Structure with saved interruption state.
683
 */
681
 */
684
void data_access_bit_fault(uint64_t vector, istate_t *istate)
682
void data_access_bit_fault(uint64_t vector, istate_t *istate)
685
{
683
{
686
    region_register rr;
684
    region_register rr;
687
    rid_t rid;
685
    rid_t rid;
688
    uintptr_t va;
686
    uintptr_t va;
689
    pte_t *t;
687
    pte_t *t;
690
 
688
 
691
    va = istate->cr_ifa;    /* faulting address */
689
    va = istate->cr_ifa;    /* faulting address */
692
    rr.word = rr_read(VA2VRN(va));
690
    rr.word = rr_read(VA2VRN(va));
693
    rid = rr.map.rid;
691
    rid = rr.map.rid;
694
 
692
 
695
    page_table_lock(AS, true);
693
    page_table_lock(AS, true);
696
    t = page_mapping_find(AS, va);
694
    t = page_mapping_find(AS, va);
697
    ASSERT(t && t->p);
695
    ASSERT(t && t->p);
698
    if (t && t->p) {
696
    if (t && t->p) {
699
        /*
697
        /*
700
         * Update the Accessed bit in page tables and reinsert
698
         * Update the Accessed bit in page tables and reinsert
701
         * the mapping into DTC.
699
         * the mapping into DTC.
702
         */
700
         */
703
        t->a = true;
701
        t->a = true;
704
        dtc_pte_copy(t);
702
        dtc_pte_copy(t);
705
    } else {
703
    } else {
706
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
704
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
707
            fault_if_from_uspace(istate,"Page fault at %p",va);
705
            fault_if_from_uspace(istate, "Page fault at %p", va);
708
            panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
706
            panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid,
709
            t->a = true;
-
 
710
            itc_pte_copy(t);
707
                istate->cr_iip);
711
        }
708
        }
712
    }
709
    }
713
    page_table_unlock(AS, true);
710
    page_table_unlock(AS, true);
714
}
711
}
715
 
712
 
716
/** Page not present fault handler.
713
/** Page not present fault handler.
717
 *
714
 *
718
 * @param vector Interruption vector.
715
 * @param vector Interruption vector.
719
 * @param istate Structure with saved interruption state.
716
 * @param istate Structure with saved interruption state.
720
 */
717
 */
721
void page_not_present(uint64_t vector, istate_t *istate)
718
void page_not_present(uint64_t vector, istate_t *istate)
722
{
719
{
723
    region_register rr;
720
    region_register rr;
724
    rid_t rid;
721
    rid_t rid;
725
    uintptr_t va;
722
    uintptr_t va;
726
    pte_t *t;
723
    pte_t *t;
727
   
724
   
728
    va = istate->cr_ifa;    /* faulting address */
725
    va = istate->cr_ifa;    /* faulting address */
729
    rr.word = rr_read(VA2VRN(va));
726
    rr.word = rr_read(VA2VRN(va));
730
    rid = rr.map.rid;
727
    rid = rr.map.rid;
731
 
728
 
732
    page_table_lock(AS, true);
729
    page_table_lock(AS, true);
733
    t = page_mapping_find(AS, va);
730
    t = page_mapping_find(AS, va);
734
    ASSERT(t);
731
    ASSERT(t);
735
   
732
   
736
    if (t->p) {
733
    if (t->p) {
737
        /*
734
        /*
738
         * If the Present bit is set in page hash table, just copy it
735
         * If the Present bit is set in page hash table, just copy it
739
         * and update ITC/DTC.
736
         * and update ITC/DTC.
740
         */
737
         */
741
        if (t->x)
738
        if (t->x)
742
            itc_pte_copy(t);
739
            itc_pte_copy(t);
743
        else
740
        else
744
            dtc_pte_copy(t);
741
            dtc_pte_copy(t);
745
        page_table_unlock(AS, true);
742
        page_table_unlock(AS, true);
746
    } else {
743
    } else {
747
        page_table_unlock(AS, true);
744
        page_table_unlock(AS, true);
748
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
745
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
749
            fault_if_from_uspace(istate,"Page fault at %p",va);
746
            fault_if_from_uspace(istate, "Page fault at %p", va);
750
            panic("%s: va=%p, rid=%d\n", __func__, va, rid);
747
            panic("%s: va=%p, rid=%d\n", __func__, va, rid);
751
        }
748
        }
752
    }
749
    }
753
}
750
}
754
 
751
 
755
/** @}
752
/** @}
756
 */
753
 */
757
 
754