Subversion Repositories HelenOS-historic

Rev

Rev 919 | Rev 935 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
740 jermar 1
/*
2
 * Copyright (C) 2006 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
29
/*
30
 * TLB management.
31
 */
32
 
33
#include <mm/tlb.h>
901 jermar 34
#include <mm/asid.h>
902 jermar 35
#include <mm/page.h>
36
#include <mm/as.h>
818 vana 37
#include <arch/mm/tlb.h>
901 jermar 38
#include <arch/mm/page.h>
819 vana 39
#include <arch/barrier.h>
900 jermar 40
#include <arch/interrupt.h>
928 vana 41
#include <arch/pal/pal.h>
42
#include <arch/asm.h>
899 jermar 43
#include <typedefs.h>
900 jermar 44
#include <panic.h>
902 jermar 45
#include <arch.h>
740 jermar 46
 
756 jermar 47
/** Invalidate all TLB entries. */
740 jermar 48
void tlb_invalidate_all(void)
49
{
928 vana 50
        __address adr;
51
        __u32 count1,count2,stride1,stride2;
52
 
53
        int i,j;
54
 
55
        adr=PAL_PTCE_INFO_BASE();
56
        count1=PAL_PTCE_INFO_COUNT1();
57
        count2=PAL_PTCE_INFO_COUNT2();
58
        stride1=PAL_PTCE_INFO_STRIDE1();
59
        stride2=PAL_PTCE_INFO_STRIDE2();
60
 
61
        interrupts_disable();
62
 
63
        for(i=0;i<count1;i++)
64
        {
65
            for(j=0;j<count2;j++)
66
            {
67
                asm volatile
68
                (
69
                    "ptc.e %0;;"
70
                    :
71
                    :"r" (adr)
72
                );
73
                adr+=stride2;
74
            }
75
            adr+=stride1;
76
        }
77
 
78
        interrupts_enable();
79
 
80
        srlz_d();
81
        srlz_i();
740 jermar 82
}
83
 
84
/** Invalidate entries belonging to an address space.
85
 *
86
 * @param asid Address space identifier.
87
 */
88
void tlb_invalidate_asid(asid_t asid)
89
{
90
    /* TODO */
91
}
818 vana 92
 
899 jermar 93
/** Insert data into data translation cache.
94
 *
95
 * @param va Virtual page address.
96
 * @param asid Address space identifier.
97
 * @param entry The rest of TLB entry as required by TLB insertion format.
98
 */
919 jermar 99
void dtc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry)
100
{
899 jermar 101
    tc_mapping_insert(va, asid, entry, true);
102
}
818 vana 103
 
899 jermar 104
/** Insert data into instruction translation cache.
105
 *
106
 * @param va Virtual page address.
107
 * @param asid Address space identifier.
108
 * @param entry The rest of TLB entry as required by TLB insertion format.
109
 */
919 jermar 110
void itc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry)
111
{
899 jermar 112
    tc_mapping_insert(va, asid, entry, false);
113
}
818 vana 114
 
899 jermar 115
/** Insert data into instruction or data translation cache.
116
 *
117
 * @param va Virtual page address.
118
 * @param asid Address space identifier.
119
 * @param entry The rest of TLB entry as required by TLB insertion format.
120
 * @param dtc If true, insert into data translation cache, use instruction translation cache otherwise.
121
 */
122
void tc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtc)
818 vana 123
{
124
    region_register rr;
899 jermar 125
    bool restore_rr = false;
818 vana 126
 
901 jermar 127
    rr.word = rr_read(VA2VRN(va));
128
    if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
899 jermar 129
        /*
130
         * The selected region register does not contain required RID.
131
         * Save the old content of the register and replace the RID.
132
         */
133
        region_register rr0;
818 vana 134
 
899 jermar 135
        rr0 = rr;
901 jermar 136
        rr0.map.rid = ASID2RID(asid, VA2VRN(va));
137
        rr_write(VA2VRN(va), rr0.word);
899 jermar 138
        srlz_d();
139
        srlz_i();
818 vana 140
    }
899 jermar 141
 
142
    __asm__ volatile (
143
        "mov r8=psr;;\n"
900 jermar 144
        "rsm %0;;\n"            /* PSR_IC_MASK */
899 jermar 145
        "srlz.d;;\n"
146
        "srlz.i;;\n"
147
        "mov cr.ifa=%1\n"       /* va */
148
        "mov cr.itir=%2;;\n"        /* entry.word[1] */
149
        "cmp.eq p6,p7 = %4,r0;;\n"  /* decide between itc and dtc */
150
        "(p6) itc.i %3;;\n"
151
        "(p7) itc.d %3;;\n"
152
        "mov psr.l=r8;;\n"
153
        "srlz.d;;\n"
154
        :
900 jermar 155
        : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc)
156
        : "p6", "p7", "r8"
899 jermar 157
    );
158
 
159
    if (restore_rr) {
901 jermar 160
        rr_write(VA2VRN(va), rr.word);
819 vana 161
        srlz_d();
899 jermar 162
        srlz_i();
818 vana 163
    }
899 jermar 164
}
818 vana 165
 
899 jermar 166
/** Insert data into instruction translation register.
167
 *
168
 * @param va Virtual page address.
169
 * @param asid Address space identifier.
170
 * @param entry The rest of TLB entry as required by TLB insertion format.
171
 * @param tr Translation register.
172
 */
173
void itr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
174
{
175
    tr_mapping_insert(va, asid, entry, false, tr);
176
}
818 vana 177
 
899 jermar 178
/** Insert data into data translation register.
179
 *
180
 * @param va Virtual page address.
181
 * @param asid Address space identifier.
182
 * @param entry The rest of TLB entry as required by TLB insertion format.
183
 * @param tr Translation register.
184
 */
185
void dtr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
186
{
187
    tr_mapping_insert(va, asid, entry, true, tr);
818 vana 188
}
189
 
899 jermar 190
/** Insert data into instruction or data translation register.
191
 *
192
 * @param va Virtual page address.
193
 * @param asid Address space identifier.
194
 * @param entry The rest of TLB entry as required by TLB insertion format.
195
 * @param dtc If true, insert into data translation register, use instruction translation register otherwise.
196
 * @param tr Translation register.
197
 */
198
void tr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr)
818 vana 199
{
200
    region_register rr;
899 jermar 201
    bool restore_rr = false;
818 vana 202
 
901 jermar 203
    rr.word = rr_read(VA2VRN(va));
204
    if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
899 jermar 205
        /*
206
         * The selected region register does not contain required RID.
207
         * Save the old content of the register and replace the RID.
208
         */
209
        region_register rr0;
818 vana 210
 
899 jermar 211
        rr0 = rr;
901 jermar 212
        rr0.map.rid = ASID2RID(asid, VA2VRN(va));
213
        rr_write(VA2VRN(va), rr0.word);
899 jermar 214
        srlz_d();
215
        srlz_i();
216
    }
818 vana 217
 
899 jermar 218
    __asm__ volatile (
219
        "mov r8=psr;;\n"
900 jermar 220
        "rsm %0;;\n"            /* PSR_IC_MASK */
899 jermar 221
        "srlz.d;;\n"
222
        "srlz.i;;\n"
223
        "mov cr.ifa=%1\n"           /* va */         
224
        "mov cr.itir=%2;;\n"        /* entry.word[1] */
225
        "cmp.eq p6,p7=%5,r0;;\n"    /* decide between itr and dtr */
226
        "(p6) itr.i itr[%4]=%3;;\n"
227
        "(p7) itr.d dtr[%4]=%3;;\n"
228
        "mov psr.l=r8;;\n"
229
        "srlz.d;;\n"
230
        :
900 jermar 231
        : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr)
232
        : "p6", "p7", "r8"
899 jermar 233
    );
234
 
235
    if (restore_rr) {
901 jermar 236
        rr_write(VA2VRN(va), rr.word);
819 vana 237
        srlz_d();
899 jermar 238
        srlz_i();
818 vana 239
    }
899 jermar 240
}
818 vana 241
 
901 jermar 242
/** Insert data into DTLB.
243
 *
244
 * @param va Virtual page address.
245
 * @param asid Address space identifier.
246
 * @param entry The rest of TLB entry as required by TLB insertion format.
247
 * @param dtr If true, insert into data translation register, use data translation cache otherwise.
248
 * @param tr Translation register if dtr is true, ignored otherwise.
249
 */
902 jermar 250
void dtlb_kernel_mapping_insert(__address page, __address frame, bool dtr, index_t tr)
901 jermar 251
{
252
    tlb_entry_t entry;
253
 
254
    entry.word[0] = 0;
255
    entry.word[1] = 0;
256
 
257
    entry.p = true;         /* present */
258
    entry.ma = MA_WRITEBACK;
259
    entry.a = true;         /* already accessed */
260
    entry.d = true;         /* already dirty */
261
    entry.pl = PL_KERNEL;
262
    entry.ar = AR_READ | AR_WRITE;
263
    entry.ppn = frame >> PPN_SHIFT;
264
    entry.ps = PAGE_WIDTH;
265
 
266
    if (dtr)
267
        dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
268
    else
269
        dtc_mapping_insert(page, ASID_KERNEL, entry);
270
}
271
 
902 jermar 272
/** Copy content of PTE into data translation cache.
273
 *
274
 * @param t PTE.
275
 */
276
void dtc_pte_copy(pte_t *t)
277
{
278
    tlb_entry_t entry;
279
 
280
    entry.word[0] = 0;
281
    entry.word[1] = 0;
282
 
283
    entry.p = t->p;
284
    entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
285
    entry.a = t->a;
286
    entry.d = t->d;
287
    entry.pl = t->k ? PL_KERNEL : PL_USER;
288
    entry.ar = t->w ? AR_WRITE : AR_READ;
289
    entry.ppn = t->frame >> PPN_SHIFT;
290
    entry.ps = PAGE_WIDTH;
291
 
292
    dtc_mapping_insert(t->page, t->as->asid, entry);
293
}
294
 
295
/** Copy content of PTE into instruction translation cache.
296
 *
297
 * @param t PTE.
298
 */
299
void itc_pte_copy(pte_t *t)
300
{
301
    tlb_entry_t entry;
302
 
303
    entry.word[0] = 0;
304
    entry.word[1] = 0;
305
 
306
    ASSERT(t->x);
307
 
308
    entry.p = t->p;
309
    entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
310
    entry.a = t->a;
311
    entry.pl = t->k ? PL_KERNEL : PL_USER;
312
    entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
313
    entry.ppn = t->frame >> PPN_SHIFT;
314
    entry.ps = PAGE_WIDTH;
315
 
316
    itc_mapping_insert(t->page, t->as->asid, entry);
317
}
318
 
319
/** Instruction TLB fault handler for faults with VHPT turned off.
320
 *
321
 * @param vector Interruption vector.
322
 * @param pstate Structure with saved interruption state.
323
 */
900 jermar 324
void alternate_instruction_tlb_fault(__u64 vector, struct exception_regdump *pstate)
899 jermar 325
{
902 jermar 326
    region_register rr;
327
    __address va;
328
    pte_t *t;
329
 
330
    va = pstate->cr_ifa;    /* faulting address */
331
    t = page_mapping_find(AS, va);
332
    if (t) {
333
        /*
334
         * The mapping was found in software page hash table.
335
         * Insert it into data translation cache.
336
         */
337
        itc_pte_copy(t);
338
    } else {
339
        /*
340
         * Forward the page fault to address space page fault handler.
341
         */
342
        if (!as_page_fault(va)) {
343
            panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid);
344
        }
345
    }
899 jermar 346
}
818 vana 347
 
902 jermar 348
/** Data TLB fault handler for faults with VHPT turned off.
901 jermar 349
 *
350
 * @param vector Interruption vector.
351
 * @param pstate Structure with saved interruption state.
352
 */
900 jermar 353
void alternate_data_tlb_fault(__u64 vector, struct exception_regdump *pstate)
899 jermar 354
{
901 jermar 355
    region_register rr;
356
    rid_t rid;
357
    __address va;
902 jermar 358
    pte_t *t;
901 jermar 359
 
360
    va = pstate->cr_ifa;    /* faulting address */
361
    rr.word = rr_read(VA2VRN(va));
362
    rid = rr.map.rid;
363
    if (RID2ASID(rid) == ASID_KERNEL) {
364
        if (VA2VRN(va) == VRN_KERNEL) {
365
            /*
366
             * Provide KA2PA(identity) mapping for faulting piece of
367
             * kernel address space.
368
             */
902 jermar 369
            dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
901 jermar 370
            return;
371
        }
372
    }
919 jermar 373
 
902 jermar 374
    t = page_mapping_find(AS, va);
375
    if (t) {
376
        /*
377
         * The mapping was found in software page hash table.
378
         * Insert it into data translation cache.
379
         */
380
        dtc_pte_copy(t);
381
    } else {
382
        /*
383
         * Forward the page fault to address space page fault handler.
384
         */
385
        if (!as_page_fault(va)) {
386
            panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid);
387
        }
388
    }
818 vana 389
}
390
 
902 jermar 391
/** Data nested TLB fault handler.
392
 *
393
 * This fault should not occur.
394
 *
395
 * @param vector Interruption vector.
396
 * @param pstate Structure with saved interruption state.
397
 */
900 jermar 398
void data_nested_tlb_fault(__u64 vector, struct exception_regdump *pstate)
899 jermar 399
{
400
    panic("%s\n", __FUNCTION__);
401
}
818 vana 402
 
902 jermar 403
/** Data Dirty bit fault handler.
404
 *
405
 * @param vector Interruption vector.
406
 * @param pstate Structure with saved interruption state.
407
 */
900 jermar 408
void data_dirty_bit_fault(__u64 vector, struct exception_regdump *pstate)
819 vana 409
{
902 jermar 410
    pte_t *t;
411
 
412
    t = page_mapping_find(AS, pstate->cr_ifa);
413
    ASSERT(t && t->p);
414
    if (t && t->p) {
415
        /*
416
         * Update the Dirty bit in page tables and reinsert
417
         * the mapping into DTC.
418
         */
419
        t->d = true;
420
        dtc_pte_copy(t);
421
    }
899 jermar 422
}
819 vana 423
 
902 jermar 424
/** Instruction access bit fault handler.
425
 *
426
 * @param vector Interruption vector.
427
 * @param pstate Structure with saved interruption state.
428
 */
900 jermar 429
void instruction_access_bit_fault(__u64 vector, struct exception_regdump *pstate)
899 jermar 430
{
902 jermar 431
    pte_t *t;
432
 
433
    t = page_mapping_find(AS, pstate->cr_ifa);
434
    ASSERT(t && t->p);
435
    if (t && t->p) {
436
        /*
437
         * Update the Accessed bit in page tables and reinsert
438
         * the mapping into ITC.
439
         */
440
        t->a = true;
441
        itc_pte_copy(t);
442
    }
899 jermar 443
}
819 vana 444
 
902 jermar 445
/** Data access bit fault handler.
446
 *
447
 * @param vector Interruption vector.
448
 * @param pstate Structure with saved interruption state.
449
 */
900 jermar 450
void data_access_bit_fault(__u64 vector, struct exception_regdump *pstate)
899 jermar 451
{
902 jermar 452
    pte_t *t;
453
 
454
    t = page_mapping_find(AS, pstate->cr_ifa);
455
    ASSERT(t && t->p);
456
    if (t && t->p) {
457
        /*
458
         * Update the Accessed bit in page tables and reinsert
459
         * the mapping into DTC.
460
         */
461
        t->a = true;
462
        dtc_pte_copy(t);
463
    }
819 vana 464
}
465
 
902 jermar 466
/** Page not present fault handler.
467
 *
468
 * @param vector Interruption vector.
469
 * @param pstate Structure with saved interruption state.
470
 */
900 jermar 471
void page_not_present(__u64 vector, struct exception_regdump *pstate)
819 vana 472
{
902 jermar 473
    region_register rr;
474
    __address va;
475
    pte_t *t;
476
 
477
    va = pstate->cr_ifa;    /* faulting address */
478
    t = page_mapping_find(AS, va);
479
    ASSERT(t);
480
 
481
    if (t->p) {
482
        /*
483
         * If the Present bit is set in page hash table, just copy it
484
         * and update ITC/DTC.
485
         */
486
        if (t->x)
487
            itc_pte_copy(t);
488
        else
489
            dtc_pte_copy(t);
490
    } else {
491
        if (!as_page_fault(va)) {
492
            panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid);
493
        }
494
    }
819 vana 495
}