Subversion Repositories HelenOS-historic

Rev

Rev 901 | Rev 919 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
740 jermar 1
/*
2
 * Copyright (C) 2006 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
29
/*
30
 * TLB management.
31
 */
32
 
33
#include <mm/tlb.h>
901 jermar 34
#include <mm/asid.h>
902 jermar 35
#include <mm/page.h>
36
#include <mm/as.h>
818 vana 37
#include <arch/mm/tlb.h>
901 jermar 38
#include <arch/mm/page.h>
819 vana 39
#include <arch/barrier.h>
900 jermar 40
#include <arch/interrupt.h>
899 jermar 41
#include <typedefs.h>
900 jermar 42
#include <panic.h>
902 jermar 43
#include <arch.h>
740 jermar 44
 
756 jermar 45
/** Invalidate all TLB entries. */
740 jermar 46
void tlb_invalidate_all(void)
47
{
48
    /* TODO */
49
}
50
 
51
/** Invalidate entries belonging to an address space.
52
 *
53
 * @param asid Address space identifier.
54
 */
55
void tlb_invalidate_asid(asid_t asid)
56
{
57
    /* TODO */
58
}
818 vana 59
 
899 jermar 60
/** Insert data into data translation cache.
61
 *
62
 * @param va Virtual page address.
63
 * @param asid Address space identifier.
64
 * @param entry The rest of TLB entry as required by TLB insertion format.
65
 */
66
void dtc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry) {
67
    tc_mapping_insert(va, asid, entry, true);
68
}
818 vana 69
 
899 jermar 70
/** Insert data into instruction translation cache.
71
 *
72
 * @param va Virtual page address.
73
 * @param asid Address space identifier.
74
 * @param entry The rest of TLB entry as required by TLB insertion format.
75
 */
76
void itc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry) {
77
    tc_mapping_insert(va, asid, entry, false);
78
}
818 vana 79
 
899 jermar 80
/** Insert data into instruction or data translation cache.
81
 *
82
 * @param va Virtual page address.
83
 * @param asid Address space identifier.
84
 * @param entry The rest of TLB entry as required by TLB insertion format.
85
 * @param dtc If true, insert into data translation cache, use instruction translation cache otherwise.
86
 */
87
void tc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtc)
818 vana 88
{
89
    region_register rr;
899 jermar 90
    bool restore_rr = false;
818 vana 91
 
901 jermar 92
    rr.word = rr_read(VA2VRN(va));
93
    if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
899 jermar 94
        /*
95
         * The selected region register does not contain required RID.
96
         * Save the old content of the register and replace the RID.
97
         */
98
        region_register rr0;
818 vana 99
 
899 jermar 100
        rr0 = rr;
901 jermar 101
        rr0.map.rid = ASID2RID(asid, VA2VRN(va));
102
        rr_write(VA2VRN(va), rr0.word);
899 jermar 103
        srlz_d();
104
        srlz_i();
818 vana 105
    }
899 jermar 106
 
107
    __asm__ volatile (
108
        "mov r8=psr;;\n"
900 jermar 109
        "rsm %0;;\n"            /* PSR_IC_MASK */
899 jermar 110
        "srlz.d;;\n"
111
        "srlz.i;;\n"
112
        "mov cr.ifa=%1\n"       /* va */
113
        "mov cr.itir=%2;;\n"        /* entry.word[1] */
114
        "cmp.eq p6,p7 = %4,r0;;\n"  /* decide between itc and dtc */
115
        "(p6) itc.i %3;;\n"
116
        "(p7) itc.d %3;;\n"
117
        "mov psr.l=r8;;\n"
118
        "srlz.d;;\n"
119
        :
900 jermar 120
        : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc)
121
        : "p6", "p7", "r8"
899 jermar 122
    );
123
 
124
    if (restore_rr) {
901 jermar 125
        rr_write(VA2VRN(va), rr.word);
819 vana 126
        srlz_d();
899 jermar 127
        srlz_i();
818 vana 128
    }
899 jermar 129
}
818 vana 130
 
899 jermar 131
/** Insert data into instruction translation register.
132
 *
133
 * @param va Virtual page address.
134
 * @param asid Address space identifier.
135
 * @param entry The rest of TLB entry as required by TLB insertion format.
136
 * @param tr Translation register.
137
 */
138
void itr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
139
{
140
    tr_mapping_insert(va, asid, entry, false, tr);
141
}
818 vana 142
 
899 jermar 143
/** Insert data into data translation register.
144
 *
145
 * @param va Virtual page address.
146
 * @param asid Address space identifier.
147
 * @param entry The rest of TLB entry as required by TLB insertion format.
148
 * @param tr Translation register.
149
 */
150
void dtr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
151
{
152
    tr_mapping_insert(va, asid, entry, true, tr);
818 vana 153
}
154
 
899 jermar 155
/** Insert data into instruction or data translation register.
156
 *
157
 * @param va Virtual page address.
158
 * @param asid Address space identifier.
159
 * @param entry The rest of TLB entry as required by TLB insertion format.
160
 * @param dtc If true, insert into data translation register, use instruction translation register otherwise.
161
 * @param tr Translation register.
162
 */
163
void tr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr)
818 vana 164
{
165
    region_register rr;
899 jermar 166
    bool restore_rr = false;
818 vana 167
 
901 jermar 168
    rr.word = rr_read(VA2VRN(va));
169
    if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
899 jermar 170
        /*
171
         * The selected region register does not contain required RID.
172
         * Save the old content of the register and replace the RID.
173
         */
174
        region_register rr0;
818 vana 175
 
899 jermar 176
        rr0 = rr;
901 jermar 177
        rr0.map.rid = ASID2RID(asid, VA2VRN(va));
178
        rr_write(VA2VRN(va), rr0.word);
899 jermar 179
        srlz_d();
180
        srlz_i();
181
    }
818 vana 182
 
899 jermar 183
    __asm__ volatile (
184
        "mov r8=psr;;\n"
900 jermar 185
        "rsm %0;;\n"            /* PSR_IC_MASK */
899 jermar 186
        "srlz.d;;\n"
187
        "srlz.i;;\n"
188
        "mov cr.ifa=%1\n"           /* va */         
189
        "mov cr.itir=%2;;\n"        /* entry.word[1] */
190
        "cmp.eq p6,p7=%5,r0;;\n"    /* decide between itr and dtr */
191
        "(p6) itr.i itr[%4]=%3;;\n"
192
        "(p7) itr.d dtr[%4]=%3;;\n"
193
        "mov psr.l=r8;;\n"
194
        "srlz.d;;\n"
195
        :
900 jermar 196
        : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr)
197
        : "p6", "p7", "r8"
899 jermar 198
    );
199
 
200
    if (restore_rr) {
901 jermar 201
        rr_write(VA2VRN(va), rr.word);
819 vana 202
        srlz_d();
899 jermar 203
        srlz_i();
818 vana 204
    }
899 jermar 205
}
818 vana 206
 
901 jermar 207
/** Insert data into DTLB.
208
 *
209
 * @param va Virtual page address.
210
 * @param asid Address space identifier.
211
 * @param entry The rest of TLB entry as required by TLB insertion format.
212
 * @param dtr If true, insert into data translation register, use data translation cache otherwise.
213
 * @param tr Translation register if dtr is true, ignored otherwise.
214
 */
902 jermar 215
void dtlb_kernel_mapping_insert(__address page, __address frame, bool dtr, index_t tr)
901 jermar 216
{
217
    tlb_entry_t entry;
218
 
219
    entry.word[0] = 0;
220
    entry.word[1] = 0;
221
 
222
    entry.p = true;         /* present */
223
    entry.ma = MA_WRITEBACK;
224
    entry.a = true;         /* already accessed */
225
    entry.d = true;         /* already dirty */
226
    entry.pl = PL_KERNEL;
227
    entry.ar = AR_READ | AR_WRITE;
228
    entry.ppn = frame >> PPN_SHIFT;
229
    entry.ps = PAGE_WIDTH;
230
 
231
    if (dtr)
232
        dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
233
    else
234
        dtc_mapping_insert(page, ASID_KERNEL, entry);
235
}
236
 
902 jermar 237
/** Copy content of PTE into data translation cache.
238
 *
239
 * @param t PTE.
240
 */
241
void dtc_pte_copy(pte_t *t)
242
{
243
    tlb_entry_t entry;
244
 
245
    entry.word[0] = 0;
246
    entry.word[1] = 0;
247
 
248
    entry.p = t->p;
249
    entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
250
    entry.a = t->a;
251
    entry.d = t->d;
252
    entry.pl = t->k ? PL_KERNEL : PL_USER;
253
    entry.ar = t->w ? AR_WRITE : AR_READ;
254
    entry.ppn = t->frame >> PPN_SHIFT;
255
    entry.ps = PAGE_WIDTH;
256
 
257
    dtc_mapping_insert(t->page, t->as->asid, entry);
258
}
259
 
260
/** Copy content of PTE into instruction translation cache.
261
 *
262
 * @param t PTE.
263
 */
264
void itc_pte_copy(pte_t *t)
265
{
266
    tlb_entry_t entry;
267
 
268
    entry.word[0] = 0;
269
    entry.word[1] = 0;
270
 
271
    ASSERT(t->x);
272
 
273
    entry.p = t->p;
274
    entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
275
    entry.a = t->a;
276
    entry.pl = t->k ? PL_KERNEL : PL_USER;
277
    entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
278
    entry.ppn = t->frame >> PPN_SHIFT;
279
    entry.ps = PAGE_WIDTH;
280
 
281
    itc_mapping_insert(t->page, t->as->asid, entry);
282
}
283
 
284
/** Instruction TLB fault handler for faults with VHPT turned off.
285
 *
286
 * @param vector Interruption vector.
287
 * @param pstate Structure with saved interruption state.
288
 */
900 jermar 289
void alternate_instruction_tlb_fault(__u64 vector, struct exception_regdump *pstate)
899 jermar 290
{
902 jermar 291
    region_register rr;
292
    __address va;
293
    pte_t *t;
294
 
295
    va = pstate->cr_ifa;    /* faulting address */
296
    t = page_mapping_find(AS, va);
297
    if (t) {
298
        /*
299
         * The mapping was found in software page hash table.
300
         * Insert it into data translation cache.
301
         */
302
        itc_pte_copy(t);
303
    } else {
304
        /*
305
         * Forward the page fault to address space page fault handler.
306
         */
307
        if (!as_page_fault(va)) {
308
            panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid);
309
        }
310
    }
899 jermar 311
}
818 vana 312
 
902 jermar 313
/** Data TLB fault handler for faults with VHPT turned off.
901 jermar 314
 *
315
 * @param vector Interruption vector.
316
 * @param pstate Structure with saved interruption state.
317
 */
900 jermar 318
void alternate_data_tlb_fault(__u64 vector, struct exception_regdump *pstate)
899 jermar 319
{
901 jermar 320
    region_register rr;
321
    rid_t rid;
322
    __address va;
902 jermar 323
    pte_t *t;
901 jermar 324
 
325
    va = pstate->cr_ifa;    /* faulting address */
326
    rr.word = rr_read(VA2VRN(va));
327
    rid = rr.map.rid;
328
    if (RID2ASID(rid) == ASID_KERNEL) {
329
        if (VA2VRN(va) == VRN_KERNEL) {
330
            /*
331
             * Provide KA2PA(identity) mapping for faulting piece of
332
             * kernel address space.
333
             */
902 jermar 334
            dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
901 jermar 335
            return;
336
        }
337
    }
902 jermar 338
 
339
    t = page_mapping_find(AS, va);
340
    if (t) {
341
        /*
342
         * The mapping was found in software page hash table.
343
         * Insert it into data translation cache.
344
         */
345
        dtc_pte_copy(t);
346
    } else {
347
        /*
348
         * Forward the page fault to address space page fault handler.
349
         */
350
        if (!as_page_fault(va)) {
351
            panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid);
352
        }
353
    }
818 vana 354
}
355
 
902 jermar 356
/** Data nested TLB fault handler.
357
 *
358
 * This fault should not occur.
359
 *
360
 * @param vector Interruption vector.
361
 * @param pstate Structure with saved interruption state.
362
 */
900 jermar 363
void data_nested_tlb_fault(__u64 vector, struct exception_regdump *pstate)
899 jermar 364
{
365
    panic("%s\n", __FUNCTION__);
366
}
818 vana 367
 
902 jermar 368
/** Data Dirty bit fault handler.
369
 *
370
 * @param vector Interruption vector.
371
 * @param pstate Structure with saved interruption state.
372
 */
900 jermar 373
void data_dirty_bit_fault(__u64 vector, struct exception_regdump *pstate)
819 vana 374
{
902 jermar 375
    pte_t *t;
376
 
377
    t = page_mapping_find(AS, pstate->cr_ifa);
378
    ASSERT(t && t->p);
379
    if (t && t->p) {
380
        /*
381
         * Update the Dirty bit in page tables and reinsert
382
         * the mapping into DTC.
383
         */
384
        t->d = true;
385
        dtc_pte_copy(t);
386
    }
899 jermar 387
}
819 vana 388
 
902 jermar 389
/** Instruction access bit fault handler.
390
 *
391
 * @param vector Interruption vector.
392
 * @param pstate Structure with saved interruption state.
393
 */
900 jermar 394
void instruction_access_bit_fault(__u64 vector, struct exception_regdump *pstate)
899 jermar 395
{
902 jermar 396
    pte_t *t;
397
 
398
    t = page_mapping_find(AS, pstate->cr_ifa);
399
    ASSERT(t && t->p);
400
    if (t && t->p) {
401
        /*
402
         * Update the Accessed bit in page tables and reinsert
403
         * the mapping into ITC.
404
         */
405
        t->a = true;
406
        itc_pte_copy(t);
407
    }
899 jermar 408
}
819 vana 409
 
902 jermar 410
/** Data access bit fault handler.
411
 *
412
 * @param vector Interruption vector.
413
 * @param pstate Structure with saved interruption state.
414
 */
900 jermar 415
void data_access_bit_fault(__u64 vector, struct exception_regdump *pstate)
899 jermar 416
{
902 jermar 417
    pte_t *t;
418
 
419
    t = page_mapping_find(AS, pstate->cr_ifa);
420
    ASSERT(t && t->p);
421
    if (t && t->p) {
422
        /*
423
         * Update the Accessed bit in page tables and reinsert
424
         * the mapping into DTC.
425
         */
426
        t->a = true;
427
        dtc_pte_copy(t);
428
    }
819 vana 429
}
430
 
902 jermar 431
/** Page not present fault handler.
432
 *
433
 * @param vector Interruption vector.
434
 * @param pstate Structure with saved interruption state.
435
 */
900 jermar 436
void page_not_present(__u64 vector, struct exception_regdump *pstate)
819 vana 437
{
902 jermar 438
    region_register rr;
439
    __address va;
440
    pte_t *t;
441
 
442
    va = pstate->cr_ifa;    /* faulting address */
443
    t = page_mapping_find(AS, va);
444
    ASSERT(t);
445
 
446
    if (t->p) {
447
        /*
448
         * If the Present bit is set in page hash table, just copy it
449
         * and update ITC/DTC.
450
         */
451
        if (t->x)
452
            itc_pte_copy(t);
453
        else
454
            dtc_pte_copy(t);
455
    } else {
456
        if (!as_page_fault(va)) {
457
            panic("%s: va=%P, rid=%d\n", __FUNCTION__, pstate->cr_ifa, rr.map.rid);
458
        }
459
    }
819 vana 460
}