Subversion Repositories HelenOS

Rev

Rev 993 | Rev 1080 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
740 jermar 1
/*
2
 * Copyright (C) 2006 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
29
/*
30
 * TLB management.
31
 */
32
 
33
#include <mm/tlb.h>
901 jermar 34
#include <mm/asid.h>
902 jermar 35
#include <mm/page.h>
36
#include <mm/as.h>
818 vana 37
#include <arch/mm/tlb.h>
901 jermar 38
#include <arch/mm/page.h>
819 vana 39
#include <arch/barrier.h>
900 jermar 40
#include <arch/interrupt.h>
928 vana 41
#include <arch/pal/pal.h>
42
#include <arch/asm.h>
899 jermar 43
#include <typedefs.h>
900 jermar 44
#include <panic.h>
993 jermar 45
#include <print.h>
902 jermar 46
#include <arch.h>
740 jermar 47
 
756 jermar 48
/** Invalidate all TLB entries. */
740 jermar 49
void tlb_invalidate_all(void)
50
{
993 jermar 51
        ipl_t ipl;
928 vana 52
        __address adr;
993 jermar 53
        __u32 count1, count2, stride1, stride2;
928 vana 54
 
55
        int i,j;
56
 
993 jermar 57
        adr = PAL_PTCE_INFO_BASE();
58
        count1 = PAL_PTCE_INFO_COUNT1();
59
        count2 = PAL_PTCE_INFO_COUNT2();
60
        stride1 = PAL_PTCE_INFO_STRIDE1();
61
        stride2 = PAL_PTCE_INFO_STRIDE2();
928 vana 62
 
993 jermar 63
        ipl = interrupts_disable();
928 vana 64
 
993 jermar 65
        for(i = 0; i < count1; i++) {
66
            for(j = 0; j < count2; j++) {
67
                __asm__ volatile (
68
                    "ptc.e %0 ;;"
928 vana 69
                    :
993 jermar 70
                    : "r" (adr)
928 vana 71
                );
993 jermar 72
                adr += stride2;
928 vana 73
            }
993 jermar 74
            adr += stride1;
928 vana 75
        }
76
 
993 jermar 77
        interrupts_restore(ipl);
928 vana 78
 
79
        srlz_d();
80
        srlz_i();
740 jermar 81
}
82
 
83
/** Invalidate entries belonging to an address space.
84
 *
85
 * @param asid Address space identifier.
86
 */
87
void tlb_invalidate_asid(asid_t asid)
88
{
935 vana 89
    tlb_invalidate_all();
740 jermar 90
}
818 vana 91
 
935 vana 92
 
947 vana 93
void tlb_invalidate_pages(asid_t asid, __address page, count_t cnt)
935 vana 94
{
944 vana 95
    region_register rr;
96
    bool restore_rr = false;
993 jermar 97
    int b = 0;
98
    int c = cnt;
944 vana 99
 
947 vana 100
    __address va;
993 jermar 101
    va = page;
947 vana 102
 
944 vana 103
    rr.word = rr_read(VA2VRN(va));
104
    if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
105
        /*
106
         * The selected region register does not contain required RID.
107
         * Save the old content of the register and replace the RID.
108
         */
109
        region_register rr0;
110
 
111
        rr0 = rr;
112
        rr0.map.rid = ASID2RID(asid, VA2VRN(va));
113
        rr_write(VA2VRN(va), rr0.word);
114
        srlz_d();
115
        srlz_i();
116
    }
117
 
993 jermar 118
    while(c >>= 1)
119
        b++;
120
    b >>= 1;
944 vana 121
    __u64 ps;
122
 
993 jermar 123
    switch (b) {
944 vana 124
        case 0: /*cnt 1-3*/
993 jermar 125
            ps = PAGE_WIDTH;
944 vana 126
            break;
127
        case 1: /*cnt 4-15*/
947 vana 128
            /*cnt=((cnt-1)/4)+1;*/
993 jermar 129
            ps = PAGE_WIDTH+2;
130
            va &= ~((1<<ps)-1);
944 vana 131
            break;
132
        case 2: /*cnt 16-63*/
947 vana 133
            /*cnt=((cnt-1)/16)+1;*/
993 jermar 134
            ps = PAGE_WIDTH+4;
135
            va &= ~((1<<ps)-1);
944 vana 136
            break;
137
        case 3: /*cnt 64-255*/
947 vana 138
            /*cnt=((cnt-1)/64)+1;*/
993 jermar 139
            ps = PAGE_WIDTH+6;
140
            va &= ~((1<<ps)-1);
944 vana 141
            break;
142
        case 4: /*cnt 256-1023*/
947 vana 143
            /*cnt=((cnt-1)/256)+1;*/
993 jermar 144
            ps = PAGE_WIDTH+8;
145
            va &= ~((1<<ps)-1);
944 vana 146
            break;
147
        case 5: /*cnt 1024-4095*/
947 vana 148
            /*cnt=((cnt-1)/1024)+1;*/
993 jermar 149
            ps = PAGE_WIDTH+10;
150
            va &= ~((1<<ps)-1);
944 vana 151
            break;
152
        case 6: /*cnt 4096-16383*/
947 vana 153
            /*cnt=((cnt-1)/4096)+1;*/
993 jermar 154
            ps = PAGE_WIDTH+12;
155
            va &= ~((1<<ps)-1);
944 vana 156
            break;
157
        case 7: /*cnt 16384-65535*/
158
        case 8: /*cnt 65536-(256K-1)*/
947 vana 159
            /*cnt=((cnt-1)/16384)+1;*/
993 jermar 160
            ps = PAGE_WIDTH+14;
161
            va &= ~((1<<ps)-1);
944 vana 162
            break;
163
        default:
947 vana 164
            /*cnt=((cnt-1)/(16384*16))+1;*/
944 vana 165
            ps=PAGE_WIDTH+18;
166
            va&=~((1<<ps)-1);
167
            break;
168
    }
947 vana 169
    /*cnt+=(page!=va);*/
993 jermar 170
    for(; va<(page+cnt*(PAGE_SIZE)); va += (1<<ps)) {
171
        __asm__ volatile (
947 vana 172
            "ptc.l %0,%1;;"
173
            :
993 jermar 174
            : "r" (va), "r" (ps<<2)
947 vana 175
        );
944 vana 176
    }
177
    srlz_d();
178
    srlz_i();
179
 
180
    if (restore_rr) {
181
        rr_write(VA2VRN(va), rr.word);
182
        srlz_d();
183
        srlz_i();
184
    }
935 vana 185
}
186
 
187
 
899 jermar 188
/** Insert data into data translation cache.
189
 *
190
 * @param va Virtual page address.
191
 * @param asid Address space identifier.
192
 * @param entry The rest of TLB entry as required by TLB insertion format.
193
 */
919 jermar 194
void dtc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry)
195
{
899 jermar 196
    tc_mapping_insert(va, asid, entry, true);
197
}
818 vana 198
 
899 jermar 199
/** Insert data into instruction translation cache.
200
 *
201
 * @param va Virtual page address.
202
 * @param asid Address space identifier.
203
 * @param entry The rest of TLB entry as required by TLB insertion format.
204
 */
919 jermar 205
void itc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry)
206
{
899 jermar 207
    tc_mapping_insert(va, asid, entry, false);
208
}
818 vana 209
 
899 jermar 210
/** Insert data into instruction or data translation cache.
211
 *
212
 * @param va Virtual page address.
213
 * @param asid Address space identifier.
214
 * @param entry The rest of TLB entry as required by TLB insertion format.
215
 * @param dtc If true, insert into data translation cache, use instruction translation cache otherwise.
216
 */
217
void tc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtc)
818 vana 218
{
219
    region_register rr;
899 jermar 220
    bool restore_rr = false;
818 vana 221
 
901 jermar 222
    rr.word = rr_read(VA2VRN(va));
223
    if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
899 jermar 224
        /*
225
         * The selected region register does not contain required RID.
226
         * Save the old content of the register and replace the RID.
227
         */
228
        region_register rr0;
818 vana 229
 
899 jermar 230
        rr0 = rr;
901 jermar 231
        rr0.map.rid = ASID2RID(asid, VA2VRN(va));
232
        rr_write(VA2VRN(va), rr0.word);
899 jermar 233
        srlz_d();
234
        srlz_i();
818 vana 235
    }
899 jermar 236
 
237
    __asm__ volatile (
238
        "mov r8=psr;;\n"
900 jermar 239
        "rsm %0;;\n"            /* PSR_IC_MASK */
899 jermar 240
        "srlz.d;;\n"
241
        "srlz.i;;\n"
242
        "mov cr.ifa=%1\n"       /* va */
243
        "mov cr.itir=%2;;\n"        /* entry.word[1] */
244
        "cmp.eq p6,p7 = %4,r0;;\n"  /* decide between itc and dtc */
245
        "(p6) itc.i %3;;\n"
246
        "(p7) itc.d %3;;\n"
247
        "mov psr.l=r8;;\n"
248
        "srlz.d;;\n"
249
        :
900 jermar 250
        : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc)
251
        : "p6", "p7", "r8"
899 jermar 252
    );
253
 
254
    if (restore_rr) {
901 jermar 255
        rr_write(VA2VRN(va), rr.word);
819 vana 256
        srlz_d();
899 jermar 257
        srlz_i();
818 vana 258
    }
899 jermar 259
}
818 vana 260
 
899 jermar 261
/** Insert data into instruction translation register.
262
 *
263
 * @param va Virtual page address.
264
 * @param asid Address space identifier.
265
 * @param entry The rest of TLB entry as required by TLB insertion format.
266
 * @param tr Translation register.
267
 */
268
void itr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
269
{
270
    tr_mapping_insert(va, asid, entry, false, tr);
271
}
818 vana 272
 
899 jermar 273
/** Insert data into data translation register.
274
 *
275
 * @param va Virtual page address.
276
 * @param asid Address space identifier.
277
 * @param entry The rest of TLB entry as required by TLB insertion format.
278
 * @param tr Translation register.
279
 */
280
void dtr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
281
{
282
    tr_mapping_insert(va, asid, entry, true, tr);
818 vana 283
}
284
 
899 jermar 285
/** Insert data into instruction or data translation register.
286
 *
287
 * @param va Virtual page address.
288
 * @param asid Address space identifier.
289
 * @param entry The rest of TLB entry as required by TLB insertion format.
290
 * @param dtc If true, insert into data translation register, use instruction translation register otherwise.
291
 * @param tr Translation register.
292
 */
293
void tr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr)
818 vana 294
{
295
    region_register rr;
899 jermar 296
    bool restore_rr = false;
818 vana 297
 
901 jermar 298
    rr.word = rr_read(VA2VRN(va));
299
    if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
899 jermar 300
        /*
301
         * The selected region register does not contain required RID.
302
         * Save the old content of the register and replace the RID.
303
         */
304
        region_register rr0;
818 vana 305
 
899 jermar 306
        rr0 = rr;
901 jermar 307
        rr0.map.rid = ASID2RID(asid, VA2VRN(va));
308
        rr_write(VA2VRN(va), rr0.word);
899 jermar 309
        srlz_d();
310
        srlz_i();
311
    }
818 vana 312
 
899 jermar 313
    __asm__ volatile (
314
        "mov r8=psr;;\n"
900 jermar 315
        "rsm %0;;\n"            /* PSR_IC_MASK */
899 jermar 316
        "srlz.d;;\n"
317
        "srlz.i;;\n"
318
        "mov cr.ifa=%1\n"           /* va */         
319
        "mov cr.itir=%2;;\n"        /* entry.word[1] */
320
        "cmp.eq p6,p7=%5,r0;;\n"    /* decide between itr and dtr */
321
        "(p6) itr.i itr[%4]=%3;;\n"
322
        "(p7) itr.d dtr[%4]=%3;;\n"
323
        "mov psr.l=r8;;\n"
324
        "srlz.d;;\n"
325
        :
900 jermar 326
        : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr)
327
        : "p6", "p7", "r8"
899 jermar 328
    );
329
 
330
    if (restore_rr) {
901 jermar 331
        rr_write(VA2VRN(va), rr.word);
819 vana 332
        srlz_d();
899 jermar 333
        srlz_i();
818 vana 334
    }
899 jermar 335
}
818 vana 336
 
901 jermar 337
/** Insert data into DTLB.
338
 *
339
 * @param va Virtual page address.
340
 * @param asid Address space identifier.
341
 * @param entry The rest of TLB entry as required by TLB insertion format.
342
 * @param dtr If true, insert into data translation register, use data translation cache otherwise.
343
 * @param tr Translation register if dtr is true, ignored otherwise.
344
 */
902 jermar 345
void dtlb_kernel_mapping_insert(__address page, __address frame, bool dtr, index_t tr)
901 jermar 346
{
347
    tlb_entry_t entry;
348
 
349
    entry.word[0] = 0;
350
    entry.word[1] = 0;
351
 
352
    entry.p = true;         /* present */
353
    entry.ma = MA_WRITEBACK;
354
    entry.a = true;         /* already accessed */
355
    entry.d = true;         /* already dirty */
356
    entry.pl = PL_KERNEL;
357
    entry.ar = AR_READ | AR_WRITE;
358
    entry.ppn = frame >> PPN_SHIFT;
359
    entry.ps = PAGE_WIDTH;
360
 
361
    if (dtr)
362
        dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
363
    else
364
        dtc_mapping_insert(page, ASID_KERNEL, entry);
365
}
366
 
902 jermar 367
/** Copy content of PTE into data translation cache.
368
 *
369
 * @param t PTE.
370
 */
371
void dtc_pte_copy(pte_t *t)
372
{
373
    tlb_entry_t entry;
374
 
375
    entry.word[0] = 0;
376
    entry.word[1] = 0;
377
 
378
    entry.p = t->p;
379
    entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
380
    entry.a = t->a;
381
    entry.d = t->d;
382
    entry.pl = t->k ? PL_KERNEL : PL_USER;
383
    entry.ar = t->w ? AR_WRITE : AR_READ;
384
    entry.ppn = t->frame >> PPN_SHIFT;
385
    entry.ps = PAGE_WIDTH;
386
 
387
    dtc_mapping_insert(t->page, t->as->asid, entry);
388
}
389
 
390
/** Copy content of PTE into instruction translation cache.
391
 *
392
 * @param t PTE.
393
 */
394
void itc_pte_copy(pte_t *t)
395
{
396
    tlb_entry_t entry;
397
 
398
    entry.word[0] = 0;
399
    entry.word[1] = 0;
400
 
401
    ASSERT(t->x);
402
 
403
    entry.p = t->p;
404
    entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
405
    entry.a = t->a;
406
    entry.pl = t->k ? PL_KERNEL : PL_USER;
407
    entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
408
    entry.ppn = t->frame >> PPN_SHIFT;
409
    entry.ps = PAGE_WIDTH;
410
 
411
    itc_mapping_insert(t->page, t->as->asid, entry);
412
}
413
 
414
/** Instruction TLB fault handler for faults with VHPT turned off.
415
 *
416
 * @param vector Interruption vector.
958 jermar 417
 * @param istate Structure with saved interruption state.
902 jermar 418
 */
958 jermar 419
void alternate_instruction_tlb_fault(__u64 vector, istate_t *istate)
899 jermar 420
{
902 jermar 421
    region_register rr;
422
    __address va;
423
    pte_t *t;
424
 
958 jermar 425
    va = istate->cr_ifa;    /* faulting address */
1044 jermar 426
    page_table_lock(AS, true);
902 jermar 427
    t = page_mapping_find(AS, va);
428
    if (t) {
429
        /*
430
         * The mapping was found in software page hash table.
431
         * Insert it into data translation cache.
432
         */
433
        itc_pte_copy(t);
1044 jermar 434
        page_table_unlock(AS, true);
902 jermar 435
    } else {
436
        /*
437
         * Forward the page fault to address space page fault handler.
438
         */
1044 jermar 439
        page_table_unlock(AS, true);
902 jermar 440
        if (!as_page_fault(va)) {
1044 jermar 441
            page_table_unlock(AS, true);
958 jermar 442
            panic("%s: va=%P, rid=%d\n", __FUNCTION__, istate->cr_ifa, rr.map.rid);
902 jermar 443
        }
444
    }
899 jermar 445
}
818 vana 446
 
902 jermar 447
/** Data TLB fault handler for faults with VHPT turned off.
901 jermar 448
 *
449
 * @param vector Interruption vector.
958 jermar 450
 * @param istate Structure with saved interruption state.
901 jermar 451
 */
958 jermar 452
void alternate_data_tlb_fault(__u64 vector, istate_t *istate)
899 jermar 453
{
901 jermar 454
    region_register rr;
455
    rid_t rid;
456
    __address va;
902 jermar 457
    pte_t *t;
901 jermar 458
 
958 jermar 459
    va = istate->cr_ifa;    /* faulting address */
901 jermar 460
    rr.word = rr_read(VA2VRN(va));
461
    rid = rr.map.rid;
462
    if (RID2ASID(rid) == ASID_KERNEL) {
463
        if (VA2VRN(va) == VRN_KERNEL) {
464
            /*
465
             * Provide KA2PA(identity) mapping for faulting piece of
466
             * kernel address space.
467
             */
902 jermar 468
            dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
901 jermar 469
            return;
470
        }
471
    }
919 jermar 472
 
1044 jermar 473
    page_table_lock(AS, true);
902 jermar 474
    t = page_mapping_find(AS, va);
475
    if (t) {
476
        /*
477
         * The mapping was found in software page hash table.
478
         * Insert it into data translation cache.
479
         */
480
        dtc_pte_copy(t);
1044 jermar 481
        page_table_unlock(AS, true);
902 jermar 482
    } else {
483
        /*
484
         * Forward the page fault to address space page fault handler.
485
         */
1044 jermar 486
        page_table_unlock(AS, true);
902 jermar 487
        if (!as_page_fault(va)) {
993 jermar 488
            panic("%s: va=%P, rid=%d, iip=%P\n", __FUNCTION__, va, rid, istate->cr_iip);
902 jermar 489
        }
490
    }
818 vana 491
}
492
 
902 jermar 493
/** Data nested TLB fault handler.
494
 *
495
 * This fault should not occur.
496
 *
497
 * @param vector Interruption vector.
958 jermar 498
 * @param istate Structure with saved interruption state.
902 jermar 499
 */
958 jermar 500
void data_nested_tlb_fault(__u64 vector, istate_t *istate)
899 jermar 501
{
502
    panic("%s\n", __FUNCTION__);
503
}
818 vana 504
 
902 jermar 505
/** Data Dirty bit fault handler.
506
 *
507
 * @param vector Interruption vector.
958 jermar 508
 * @param istate Structure with saved interruption state.
902 jermar 509
 */
958 jermar 510
void data_dirty_bit_fault(__u64 vector, istate_t *istate)
819 vana 511
{
902 jermar 512
    pte_t *t;
513
 
1044 jermar 514
    page_table_lock(AS, true);
958 jermar 515
    t = page_mapping_find(AS, istate->cr_ifa);
902 jermar 516
    ASSERT(t && t->p);
517
    if (t && t->p) {
518
        /*
519
         * Update the Dirty bit in page tables and reinsert
520
         * the mapping into DTC.
521
         */
522
        t->d = true;
523
        dtc_pte_copy(t);
524
    }
1044 jermar 525
    page_table_unlock(AS, true);
899 jermar 526
}
819 vana 527
 
902 jermar 528
/** Instruction access bit fault handler.
529
 *
530
 * @param vector Interruption vector.
958 jermar 531
 * @param istate Structure with saved interruption state.
902 jermar 532
 */
958 jermar 533
void instruction_access_bit_fault(__u64 vector, istate_t *istate)
899 jermar 534
{
902 jermar 535
    pte_t *t;
536
 
1044 jermar 537
    page_table_lock(AS, true);
958 jermar 538
    t = page_mapping_find(AS, istate->cr_ifa);
902 jermar 539
    ASSERT(t && t->p);
540
    if (t && t->p) {
541
        /*
542
         * Update the Accessed bit in page tables and reinsert
543
         * the mapping into ITC.
544
         */
545
        t->a = true;
546
        itc_pte_copy(t);
547
    }
1044 jermar 548
    page_table_unlock(AS, true);
899 jermar 549
}
819 vana 550
 
902 jermar 551
/** Data access bit fault handler.
552
 *
553
 * @param vector Interruption vector.
958 jermar 554
 * @param istate Structure with saved interruption state.
902 jermar 555
 */
958 jermar 556
void data_access_bit_fault(__u64 vector, istate_t *istate)
899 jermar 557
{
902 jermar 558
    pte_t *t;
559
 
1044 jermar 560
    page_table_lock(AS, true);
958 jermar 561
    t = page_mapping_find(AS, istate->cr_ifa);
902 jermar 562
    ASSERT(t && t->p);
563
    if (t && t->p) {
564
        /*
565
         * Update the Accessed bit in page tables and reinsert
566
         * the mapping into DTC.
567
         */
568
        t->a = true;
569
        dtc_pte_copy(t);
570
    }
1044 jermar 571
    page_table_unlock(AS, true);
819 vana 572
}
573
 
902 jermar 574
/** Page not present fault handler.
575
 *
576
 * @param vector Interruption vector.
958 jermar 577
 * @param istate Structure with saved interruption state.
902 jermar 578
 */
958 jermar 579
void page_not_present(__u64 vector, istate_t *istate)
819 vana 580
{
902 jermar 581
    region_register rr;
582
    __address va;
583
    pte_t *t;
584
 
958 jermar 585
    va = istate->cr_ifa;    /* faulting address */
1044 jermar 586
    page_table_lock(AS, true);
902 jermar 587
    t = page_mapping_find(AS, va);
588
    ASSERT(t);
589
 
590
    if (t->p) {
591
        /*
592
         * If the Present bit is set in page hash table, just copy it
593
         * and update ITC/DTC.
594
         */
595
        if (t->x)
596
            itc_pte_copy(t);
597
        else
598
            dtc_pte_copy(t);
1044 jermar 599
        page_table_unlock(AS, true);
902 jermar 600
    } else {
1044 jermar 601
        page_table_unlock(AS, true);
902 jermar 602
        if (!as_page_fault(va)) {
993 jermar 603
            panic("%s: va=%P, rid=%d\n", __FUNCTION__, va, rr.map.rid);
902 jermar 604
        }
605
    }
819 vana 606
}