Subversion Repositories HelenOS

Rev

Rev 902 | Rev 928 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
740 jermar 1
/*
2
 * Copyright (C) 2006 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
29
/*
30
 * TLB management.
31
 */
32
 
33
#include <mm/tlb.h>
901 jermar 34
#include <mm/asid.h>
902 jermar 35
#include <mm/page.h>
36
#include <mm/as.h>
818 vana 37
#include <arch/mm/tlb.h>
901 jermar 38
#include <arch/mm/page.h>
819 vana 39
#include <arch/barrier.h>
900 jermar 40
#include <arch/interrupt.h>
899 jermar 41
#include <typedefs.h>
900 jermar 42
#include <panic.h>
902 jermar 43
#include <arch.h>
740 jermar 44
 
756 jermar 45
/** Invalidate all TLB entries. */
740 jermar 46
void tlb_invalidate_all(void)
47
{
48
    /* TODO */
49
}
50
 
51
/** Invalidate entries belonging to an address space.
52
 *
53
 * @param asid Address space identifier.
54
 */
55
void tlb_invalidate_asid(asid_t asid)
56
{
57
    /* TODO */
58
}
818 vana 59
 
899 jermar 60
/** Insert data into data translation cache.
61
 *
62
 * @param va Virtual page address.
63
 * @param asid Address space identifier.
64
 * @param entry The rest of TLB entry as required by TLB insertion format.
65
 */
919 jermar 66
void dtc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry)
67
{
899 jermar 68
    tc_mapping_insert(va, asid, entry, true);
69
}
818 vana 70
 
899 jermar 71
/** Insert data into instruction translation cache.
72
 *
73
 * @param va Virtual page address.
74
 * @param asid Address space identifier.
75
 * @param entry The rest of TLB entry as required by TLB insertion format.
76
 */
919 jermar 77
void itc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry)
78
{
899 jermar 79
    tc_mapping_insert(va, asid, entry, false);
80
}
818 vana 81
 
899 jermar 82
/** Insert data into instruction or data translation cache.
83
 *
84
 * @param va Virtual page address.
85
 * @param asid Address space identifier.
86
 * @param entry The rest of TLB entry as required by TLB insertion format.
87
 * @param dtc If true, insert into data translation cache, use instruction translation cache otherwise.
88
 */
89
void tc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtc)
818 vana 90
{
91
    region_register rr;
899 jermar 92
    bool restore_rr = false;
818 vana 93
 
901 jermar 94
    rr.word = rr_read(VA2VRN(va));
95
    if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
899 jermar 96
        /*
97
         * The selected region register does not contain required RID.
98
         * Save the old content of the register and replace the RID.
99
         */
100
        region_register rr0;
818 vana 101
 
899 jermar 102
        rr0 = rr;
901 jermar 103
        rr0.map.rid = ASID2RID(asid, VA2VRN(va));
104
        rr_write(VA2VRN(va), rr0.word);
899 jermar 105
        srlz_d();
106
        srlz_i();
818 vana 107
    }
899 jermar 108
 
109
    __asm__ volatile (
110
        "mov r8=psr;;\n"
900 jermar 111
        "rsm %0;;\n"            /* PSR_IC_MASK */
899 jermar 112
        "srlz.d;;\n"
113
        "srlz.i;;\n"
114
        "mov cr.ifa=%1\n"       /* va */
115
        "mov cr.itir=%2;;\n"        /* entry.word[1] */
116
        "cmp.eq p6,p7 = %4,r0;;\n"  /* decide between itc and dtc */
117
        "(p6) itc.i %3;;\n"
118
        "(p7) itc.d %3;;\n"
119
        "mov psr.l=r8;;\n"
120
        "srlz.d;;\n"
121
        :
900 jermar 122
        : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc)
123
        : "p6", "p7", "r8"
899 jermar 124
    );
125
 
126
    if (restore_rr) {
901 jermar 127
        rr_write(VA2VRN(va), rr.word);
819 vana 128
        srlz_d();
899 jermar 129
        srlz_i();
818 vana 130
    }
899 jermar 131
}
818 vana 132
 
899 jermar 133
/** Insert data into instruction translation register.
134
 *
135
 * @param va Virtual page address.
136
 * @param asid Address space identifier.
137
 * @param entry The rest of TLB entry as required by TLB insertion format.
138
 * @param tr Translation register.
139
 */
140
void itr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
141
{
142
    tr_mapping_insert(va, asid, entry, false, tr);
143
}
818 vana 144
 
899 jermar 145
/** Insert data into data translation register.
146
 *
147
 * @param va Virtual page address.
148
 * @param asid Address space identifier.
149
 * @param entry The rest of TLB entry as required by TLB insertion format.
150
 * @param tr Translation register.
151
 */
152
void dtr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
153
{
154
    tr_mapping_insert(va, asid, entry, true, tr);
818 vana 155
}
156
 
899 jermar 157
/** Insert data into instruction or data translation register.
158
 *
159
 * @param va Virtual page address.
160
 * @param asid Address space identifier.
161
 * @param entry The rest of TLB entry as required by TLB insertion format.
162
 * @param dtc If true, insert into data translation register, use instruction translation register otherwise.
163
 * @param tr Translation register.
164
 */
165
void tr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr)
818 vana 166
{
167
    region_register rr;
899 jermar 168
    bool restore_rr = false;
818 vana 169
 
901 jermar 170
    rr.word = rr_read(VA2VRN(va));
171
    if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
899 jermar 172
        /*
173
         * The selected region register does not contain required RID.
174
         * Save the old content of the register and replace the RID.
175
         */
176
        region_register rr0;
818 vana 177
 
899 jermar 178
        rr0 = rr;
901 jermar 179
        rr0.map.rid = ASID2RID(asid, VA2VRN(va));
180
        rr_write(VA2VRN(va), rr0.word);
899 jermar 181
        srlz_d();
182
        srlz_i();
183
    }
818 vana 184
 
899 jermar 185
    __asm__ volatile (
186
        "mov r8=psr;;\n"
900 jermar 187
        "rsm %0;;\n"            /* PSR_IC_MASK */
899 jermar 188
        "srlz.d;;\n"
189
        "srlz.i;;\n"
190
        "mov cr.ifa=%1\n"           /* va */         
191
        "mov cr.itir=%2;;\n"        /* entry.word[1] */
192
        "cmp.eq p6,p7=%5,r0;;\n"    /* decide between itr and dtr */
193
        "(p6) itr.i itr[%4]=%3;;\n"
194
        "(p7) itr.d dtr[%4]=%3;;\n"
195
        "mov psr.l=r8;;\n"
196
        "srlz.d;;\n"
197
        :
900 jermar 198
        : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr)
199
        : "p6", "p7", "r8"
899 jermar 200
    );
201
 
202
    if (restore_rr) {
901 jermar 203
        rr_write(VA2VRN(va), rr.word);
819 vana 204
        srlz_d();
899 jermar 205
        srlz_i();
818 vana 206
    }
899 jermar 207
}
818 vana 208
 
901 jermar 209
/** Insert data into DTLB.
210
 *
211
 * @param va Virtual page address.
212
 * @param asid Address space identifier.
213
 * @param entry The rest of TLB entry as required by TLB insertion format.
214
 * @param dtr If true, insert into data translation register, use data translation cache otherwise.
215
 * @param tr Translation register if dtr is true, ignored otherwise.
216
 */
902 jermar 217
void dtlb_kernel_mapping_insert(__address page, __address frame, bool dtr, index_t tr)
901 jermar 218
{
219
    tlb_entry_t entry;
220
 
221
    entry.word[0] = 0;
222
    entry.word[1] = 0;
223
 
224
    entry.p = true;         /* present */
225
    entry.ma = MA_WRITEBACK;
226
    entry.a = true;         /* already accessed */
227
    entry.d = true;         /* already dirty */
228
    entry.pl = PL_KERNEL;
229
    entry.ar = AR_READ | AR_WRITE;
230
    entry.ppn = frame >> PPN_SHIFT;
231
    entry.ps = PAGE_WIDTH;
232
 
233
    if (dtr)
234
        dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
235
    else
236
        dtc_mapping_insert(page, ASID_KERNEL, entry);
237
}
238
 
902 jermar 239
/** Copy content of PTE into data translation cache.
240
 *
241
 * @param t PTE.
242
 */
243
void dtc_pte_copy(pte_t *t)
244
{
245
    tlb_entry_t entry;
246
 
247
    entry.word[0] = 0;
248
    entry.word[1] = 0;
249
 
250
    entry.p = t->p;
251
    entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
252
    entry.a = t->a;
253
    entry.d = t->d;
254
    entry.pl = t->k ? PL_KERNEL : PL_USER;
255
    entry.ar = t->w ? AR_WRITE : AR_READ;
256
    entry.ppn = t->frame >> PPN_SHIFT;
257
    entry.ps = PAGE_WIDTH;
258
 
259
    dtc_mapping_insert(t->page, t->as->asid, entry);
260
}
261
 
262
/** Copy content of PTE into instruction translation cache.
263
 *
264
 * @param t PTE.
265
 */
266
void itc_pte_copy(pte_t *t)
267
{
268
    tlb_entry_t entry;
269
 
270
    entry.word[0] = 0;
271
    entry.word[1] = 0;
272
 
273
    ASSERT(t->x);
274
 
275
    entry.p = t->p;
276
    entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
277
    entry.a = t->a;
278
    entry.pl = t->k ? PL_KERNEL : PL_USER;
279
    entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
280
    entry.ppn = t->frame >> PPN_SHIFT;
281
    entry.ps = PAGE_WIDTH;
282
 
283
    itc_mapping_insert(t->page, t->as->asid, entry);
284
}
285
 
286
/** Instruction TLB fault handler for faults with VHPT turned off.
287
 *
288
 * @param vector Interruption vector.
289
 * @param pstate Structure with saved interruption state.
290
 */
900 jermar 291
void alternate_instruction_tlb_fault(__u64 vector, struct exception_regdump *pstate)
899 jermar 292
{
902 jermar 293
    region_register rr;
294
    __address va;
295
    pte_t *t;
296
 
297
    va = pstate->cr_ifa;    /* faulting address */
298
    t = page_mapping_find(AS, va);
299
    if (t) {
300
        /*
301
         * The mapping was found in software page hash table.
302
         * Insert it into data translation cache.
303
         */
304
        itc_pte_copy(t);
305
    } else {
306
        /*
307
         * Forward the page fault to address space page fault handler.
308
         */
309
        if (!as_page_fault(va)) {
310
            panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid);
311
        }
312
    }
899 jermar 313
}
818 vana 314
 
902 jermar 315
/** Data TLB fault handler for faults with VHPT turned off.
901 jermar 316
 *
317
 * @param vector Interruption vector.
318
 * @param pstate Structure with saved interruption state.
319
 */
900 jermar 320
void alternate_data_tlb_fault(__u64 vector, struct exception_regdump *pstate)
899 jermar 321
{
901 jermar 322
    region_register rr;
323
    rid_t rid;
324
    __address va;
902 jermar 325
    pte_t *t;
901 jermar 326
 
327
    va = pstate->cr_ifa;    /* faulting address */
328
    rr.word = rr_read(VA2VRN(va));
329
    rid = rr.map.rid;
330
    if (RID2ASID(rid) == ASID_KERNEL) {
331
        if (VA2VRN(va) == VRN_KERNEL) {
332
            /*
333
             * Provide KA2PA(identity) mapping for faulting piece of
334
             * kernel address space.
335
             */
902 jermar 336
            dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
901 jermar 337
            return;
338
        }
339
    }
919 jermar 340
 
902 jermar 341
    t = page_mapping_find(AS, va);
342
    if (t) {
343
        /*
344
         * The mapping was found in software page hash table.
345
         * Insert it into data translation cache.
346
         */
347
        dtc_pte_copy(t);
348
    } else {
349
        /*
350
         * Forward the page fault to address space page fault handler.
351
         */
352
        if (!as_page_fault(va)) {
353
            panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid);
354
        }
355
    }
818 vana 356
}
357
 
902 jermar 358
/** Data nested TLB fault handler.
359
 *
360
 * This fault should not occur.
361
 *
362
 * @param vector Interruption vector.
363
 * @param pstate Structure with saved interruption state.
364
 */
900 jermar 365
void data_nested_tlb_fault(__u64 vector, struct exception_regdump *pstate)
899 jermar 366
{
367
    panic("%s\n", __FUNCTION__);
368
}
818 vana 369
 
902 jermar 370
/** Data Dirty bit fault handler.
371
 *
372
 * @param vector Interruption vector.
373
 * @param pstate Structure with saved interruption state.
374
 */
900 jermar 375
void data_dirty_bit_fault(__u64 vector, struct exception_regdump *pstate)
819 vana 376
{
902 jermar 377
    pte_t *t;
378
 
379
    t = page_mapping_find(AS, pstate->cr_ifa);
380
    ASSERT(t && t->p);
381
    if (t && t->p) {
382
        /*
383
         * Update the Dirty bit in page tables and reinsert
384
         * the mapping into DTC.
385
         */
386
        t->d = true;
387
        dtc_pte_copy(t);
388
    }
899 jermar 389
}
819 vana 390
 
902 jermar 391
/** Instruction access bit fault handler.
392
 *
393
 * @param vector Interruption vector.
394
 * @param pstate Structure with saved interruption state.
395
 */
900 jermar 396
void instruction_access_bit_fault(__u64 vector, struct exception_regdump *pstate)
899 jermar 397
{
902 jermar 398
    pte_t *t;
399
 
400
    t = page_mapping_find(AS, pstate->cr_ifa);
401
    ASSERT(t && t->p);
402
    if (t && t->p) {
403
        /*
404
         * Update the Accessed bit in page tables and reinsert
405
         * the mapping into ITC.
406
         */
407
        t->a = true;
408
        itc_pte_copy(t);
409
    }
899 jermar 410
}
819 vana 411
 
902 jermar 412
/** Data access bit fault handler.
413
 *
414
 * @param vector Interruption vector.
415
 * @param pstate Structure with saved interruption state.
416
 */
900 jermar 417
void data_access_bit_fault(__u64 vector, struct exception_regdump *pstate)
899 jermar 418
{
902 jermar 419
    pte_t *t;
420
 
421
    t = page_mapping_find(AS, pstate->cr_ifa);
422
    ASSERT(t && t->p);
423
    if (t && t->p) {
424
        /*
425
         * Update the Accessed bit in page tables and reinsert
426
         * the mapping into DTC.
427
         */
428
        t->a = true;
429
        dtc_pte_copy(t);
430
    }
819 vana 431
}
432
 
902 jermar 433
/** Page not present fault handler.
434
 *
435
 * @param vector Interruption vector.
436
 * @param pstate Structure with saved interruption state.
437
 */
900 jermar 438
void page_not_present(__u64 vector, struct exception_regdump *pstate)
819 vana 439
{
902 jermar 440
    region_register rr;
441
    __address va;
442
    pte_t *t;
443
 
444
    va = pstate->cr_ifa;    /* faulting address */
445
    t = page_mapping_find(AS, va);
446
    ASSERT(t);
447
 
448
    if (t->p) {
449
        /*
450
         * If the Present bit is set in page hash table, just copy it
451
         * and update ITC/DTC.
452
         */
453
        if (t->x)
454
            itc_pte_copy(t);
455
        else
456
            dtc_pte_copy(t);
457
    } else {
458
        if (!as_page_fault(va)) {
459
            panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid);
460
        }
461
    }
819 vana 462
}