Subversion Repositories HelenOS-historic

Rev

Rev 1044 | Rev 1288 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1 jermar 1
/*
319 jermar 2
 * Copyright (C) 2003-2004 Jakub Jermar
1 jermar 3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
29
#include <arch/mm/tlb.h>
727 jermar 30
#include <mm/asid.h>
1 jermar 31
#include <mm/tlb.h>
391 jermar 32
#include <mm/page.h>
703 jermar 33
#include <mm/as.h>
1 jermar 34
#include <arch/cp0.h>
35
#include <panic.h>
36
#include <arch.h>
268 palkovsky 37
#include <symtab.h>
391 jermar 38
#include <synch/spinlock.h>
39
#include <print.h>
396 jermar 40
#include <debug.h>
983 palkovsky 41
#include <align.h>
268 palkovsky 42
 
958 jermar 43
static void tlb_refill_fail(istate_t *istate);
44
static void tlb_invalid_fail(istate_t *istate);
45
static void tlb_modified_fail(istate_t *istate);
391 jermar 46
 
394 jermar 47
static pte_t *find_mapping_and_check(__address badvaddr);
399 jermar 48
 
831 jermar 49
static void prepare_entry_lo(entry_lo_t *lo, bool g, bool v, bool d, bool cacheable, __address pfn);
399 jermar 50
static void prepare_entry_hi(entry_hi_t *hi, asid_t asid, __address addr);
394 jermar 51
 
391 jermar 52
/** Initialize TLB
53
 *
54
 * Initialize TLB.
55
 * Invalidate all entries and mark wired entries.
56
 */
569 jermar 57
void tlb_arch_init(void)
389 jermar 58
{
599 jermar 59
    int i;
60
 
389 jermar 61
    cp0_pagemask_write(TLB_PAGE_MASK_16K);
599 jermar 62
    cp0_entry_hi_write(0);
63
    cp0_entry_lo0_write(0);
64
    cp0_entry_lo1_write(0);
389 jermar 65
 
599 jermar 66
    /* Clear and initialize TLB. */
67
 
68
    for (i = 0; i < TLB_ENTRY_COUNT; i++) {
69
        cp0_index_write(i);
70
        tlbwi();
71
    }
612 jermar 72
 
598 jermar 73
 
389 jermar 74
    /*
75
     * The kernel is going to make use of some wired
391 jermar 76
     * entries (e.g. mapping kernel stacks in kseg3).
389 jermar 77
     */
78
    cp0_wired_write(TLB_WIRED);
79
}
80
 
391 jermar 81
/** Process TLB Refill Exception
82
 *
83
 * Process TLB Refill Exception.
84
 *
958 jermar 85
 * @param istate Interrupted register context.
391 jermar 86
 */
958 jermar 87
void tlb_refill(istate_t *istate)
1 jermar 88
{
396 jermar 89
    entry_lo_t lo;
1044 jermar 90
    entry_hi_t hi;
91
    asid_t asid;
391 jermar 92
    __address badvaddr;
93
    pte_t *pte;
397 jermar 94
 
391 jermar 95
    badvaddr = cp0_badvaddr_read();
397 jermar 96
 
1044 jermar 97
    spinlock_lock(&AS->lock);
98
    asid = AS->asid;
99
    spinlock_unlock(&AS->lock);
399 jermar 100
 
1044 jermar 101
    page_table_lock(AS, true);
102
 
394 jermar 103
    pte = find_mapping_and_check(badvaddr);
104
    if (!pte)
391 jermar 105
        goto fail;
106
 
107
    /*
394 jermar 108
     * Record access to PTE.
391 jermar 109
     */
394 jermar 110
    pte->a = 1;
391 jermar 111
 
1044 jermar 112
    prepare_entry_hi(&hi, asid, badvaddr);
831 jermar 113
    prepare_entry_lo(&lo, pte->g, pte->p, pte->d, pte->cacheable, pte->pfn);
394 jermar 114
 
391 jermar 115
    /*
116
     * New entry is to be inserted into TLB
117
     */
399 jermar 118
    cp0_entry_hi_write(hi.value);
391 jermar 119
    if ((badvaddr/PAGE_SIZE) % 2 == 0) {
396 jermar 120
        cp0_entry_lo0_write(lo.value);
391 jermar 121
        cp0_entry_lo1_write(0);
122
    }
123
    else {
124
        cp0_entry_lo0_write(0);
396 jermar 125
        cp0_entry_lo1_write(lo.value);
391 jermar 126
    }
612 jermar 127
    cp0_pagemask_write(TLB_PAGE_MASK_16K);
391 jermar 128
    tlbwr();
129
 
1044 jermar 130
    page_table_unlock(AS, true);
391 jermar 131
    return;
132
 
133
fail:
1044 jermar 134
    page_table_unlock(AS, true);
958 jermar 135
    tlb_refill_fail(istate);
391 jermar 136
}
137
 
394 jermar 138
/** Process TLB Invalid Exception
139
 *
140
 * Process TLB Invalid Exception.
141
 *
958 jermar 142
 * @param istate Interrupted register context.
394 jermar 143
 */
958 jermar 144
void tlb_invalid(istate_t *istate)
391 jermar 145
{
396 jermar 146
    tlb_index_t index;
394 jermar 147
    __address badvaddr;
396 jermar 148
    entry_lo_t lo;
399 jermar 149
    entry_hi_t hi;
394 jermar 150
    pte_t *pte;
151
 
152
    badvaddr = cp0_badvaddr_read();
153
 
154
    /*
155
     * Locate the faulting entry in TLB.
156
     */
399 jermar 157
    hi.value = cp0_entry_hi_read();
158
    prepare_entry_hi(&hi, hi.asid, badvaddr);
159
    cp0_entry_hi_write(hi.value);
394 jermar 160
    tlbp();
396 jermar 161
    index.value = cp0_index_read();
1044 jermar 162
 
163
    page_table_lock(AS, true); 
394 jermar 164
 
165
    /*
166
     * Fail if the entry is not in TLB.
167
     */
396 jermar 168
    if (index.p) {
169
        printf("TLB entry not found.\n");
394 jermar 170
        goto fail;
396 jermar 171
    }
394 jermar 172
 
173
    pte = find_mapping_and_check(badvaddr);
174
    if (!pte)
175
        goto fail;
176
 
177
    /*
178
     * Read the faulting TLB entry.
179
     */
180
    tlbr();
181
 
182
    /*
183
     * Record access to PTE.
184
     */
185
    pte->a = 1;
186
 
831 jermar 187
    prepare_entry_lo(&lo, pte->g, pte->p, pte->d, pte->cacheable, pte->pfn);
394 jermar 188
 
189
    /*
190
     * The entry is to be updated in TLB.
191
     */
192
    if ((badvaddr/PAGE_SIZE) % 2 == 0)
396 jermar 193
        cp0_entry_lo0_write(lo.value);
394 jermar 194
    else
396 jermar 195
        cp0_entry_lo1_write(lo.value);
612 jermar 196
    cp0_pagemask_write(TLB_PAGE_MASK_16K);
394 jermar 197
    tlbwi();
198
 
1044 jermar 199
    page_table_unlock(AS, true);
394 jermar 200
    return;
201
 
202
fail:
1044 jermar 203
    page_table_unlock(AS, true);
958 jermar 204
    tlb_invalid_fail(istate);
391 jermar 205
}
206
 
394 jermar 207
/** Process TLB Modified Exception
208
 *
209
 * Process TLB Modified Exception.
210
 *
958 jermar 211
 * @param istate Interrupted register context.
394 jermar 212
 */
958 jermar 213
void tlb_modified(istate_t *istate)
391 jermar 214
{
396 jermar 215
    tlb_index_t index;
394 jermar 216
    __address badvaddr;
396 jermar 217
    entry_lo_t lo;
399 jermar 218
    entry_hi_t hi;
394 jermar 219
    pte_t *pte;
220
 
221
    badvaddr = cp0_badvaddr_read();
222
 
223
    /*
224
     * Locate the faulting entry in TLB.
225
     */
399 jermar 226
    hi.value = cp0_entry_hi_read();
227
    prepare_entry_hi(&hi, hi.asid, badvaddr);
228
    cp0_entry_hi_write(hi.value);
394 jermar 229
    tlbp();
396 jermar 230
    index.value = cp0_index_read();
1044 jermar 231
 
232
    page_table_lock(AS, true); 
394 jermar 233
 
234
    /*
235
     * Fail if the entry is not in TLB.
236
     */
396 jermar 237
    if (index.p) {
238
        printf("TLB entry not found.\n");
394 jermar 239
        goto fail;
396 jermar 240
    }
394 jermar 241
 
242
    pte = find_mapping_and_check(badvaddr);
243
    if (!pte)
244
        goto fail;
245
 
246
    /*
247
     * Fail if the page is not writable.
248
     */
249
    if (!pte->w)
250
        goto fail;
251
 
252
    /*
253
     * Read the faulting TLB entry.
254
     */
255
    tlbr();
256
 
257
    /*
258
     * Record access and write to PTE.
259
     */
260
    pte->a = 1;
831 jermar 261
    pte->d = 1;
394 jermar 262
 
831 jermar 263
    prepare_entry_lo(&lo, pte->g, pte->p, pte->w, pte->cacheable, pte->pfn);
394 jermar 264
 
265
    /*
266
     * The entry is to be updated in TLB.
267
     */
268
    if ((badvaddr/PAGE_SIZE) % 2 == 0)
396 jermar 269
        cp0_entry_lo0_write(lo.value);
394 jermar 270
    else
396 jermar 271
        cp0_entry_lo1_write(lo.value);
612 jermar 272
    cp0_pagemask_write(TLB_PAGE_MASK_16K);
394 jermar 273
    tlbwi();
274
 
1044 jermar 275
    page_table_unlock(AS, true);
394 jermar 276
    return;
277
 
278
fail:
1044 jermar 279
    page_table_unlock(AS, true);
958 jermar 280
    tlb_modified_fail(istate);
391 jermar 281
}
282
 
958 jermar 283
void tlb_refill_fail(istate_t *istate)
391 jermar 284
{
324 palkovsky 285
    char *symbol = "";
286
    char *sym2 = "";
287
 
958 jermar 288
    char *s = get_symtab_entry(istate->epc);
332 palkovsky 289
    if (s)
290
        symbol = s;
958 jermar 291
    s = get_symtab_entry(istate->ra);
332 palkovsky 292
    if (s)
293
        sym2 = s;
958 jermar 294
    panic("%X: TLB Refill Exception at %X(%s<-%s)\n", cp0_badvaddr_read(), istate->epc, symbol, sym2);
1 jermar 295
}
296
 
391 jermar 297
 
958 jermar 298
void tlb_invalid_fail(istate_t *istate)
1 jermar 299
{
268 palkovsky 300
    char *symbol = "";
301
 
958 jermar 302
    char *s = get_symtab_entry(istate->epc);
332 palkovsky 303
    if (s)
304
        symbol = s;
958 jermar 305
    panic("%X: TLB Invalid Exception at %X(%s)\n", cp0_badvaddr_read(), istate->epc, symbol);
1 jermar 306
}
307
 
958 jermar 308
void tlb_modified_fail(istate_t *istate)
389 jermar 309
{
310
    char *symbol = "";
311
 
958 jermar 312
    char *s = get_symtab_entry(istate->epc);
389 jermar 313
    if (s)
314
        symbol = s;
958 jermar 315
    panic("%X: TLB Modified Exception at %X(%s)\n", cp0_badvaddr_read(), istate->epc, symbol);
389 jermar 316
}
317
 
394 jermar 318
/** Try to find PTE for faulting address
319
 *
320
 * Try to find PTE for faulting address.
703 jermar 321
 * The AS->lock must be held on entry to this function.
394 jermar 322
 *
323
 * @param badvaddr Faulting virtual address.
324
 *
325
 * @return PTE on success, NULL otherwise.
326
 */
327
pte_t *find_mapping_and_check(__address badvaddr)
328
{
396 jermar 329
    entry_hi_t hi;
394 jermar 330
    pte_t *pte;
331
 
396 jermar 332
    hi.value = cp0_entry_hi_read();
394 jermar 333
 
334
    /*
335
     * Handler cannot succeed if the ASIDs don't match.
336
     */
703 jermar 337
    if (hi.asid != AS->asid) {
338
        printf("EntryHi.asid=%d, AS->asid=%d\n", hi.asid, AS->asid);
394 jermar 339
        return NULL;
396 jermar 340
    }
703 jermar 341
 
394 jermar 342
    /*
703 jermar 343
     * Check if the mapping exists in page tables.
344
     */
756 jermar 345
    pte = page_mapping_find(AS, badvaddr);
831 jermar 346
    if (pte && pte->p) {
703 jermar 347
        /*
348
         * Mapping found in page tables.
349
         * Immediately succeed.
350
         */
351
        return pte;
352
    } else {
353
        /*
354
         * Mapping not found in page tables.
355
         * Resort to higher-level page fault handler.
356
         */
1044 jermar 357
        page_table_unlock(AS, true);
703 jermar 358
        if (as_page_fault(badvaddr)) {
359
            /*
360
             * The higher-level page fault handler succeeded,
361
             * The mapping ought to be in place.
362
             */
1044 jermar 363
            page_table_lock(AS, true);
756 jermar 364
            pte = page_mapping_find(AS, badvaddr);
831 jermar 365
            ASSERT(pte && pte->p);
703 jermar 366
            return pte;
1044 jermar 367
        } else {
368
            page_table_lock(AS, true);
369
            printf("Page fault.\n");
370
            return NULL;
703 jermar 371
        }
1044 jermar 372
 
703 jermar 373
    }
394 jermar 374
}
375
 
831 jermar 376
void prepare_entry_lo(entry_lo_t *lo, bool g, bool v, bool d, bool cacheable, __address pfn)
394 jermar 377
{
399 jermar 378
    lo->value = 0;
394 jermar 379
    lo->g = g;
380
    lo->v = v;
381
    lo->d = d;
831 jermar 382
    lo->c = cacheable ? PAGE_CACHEABLE_EXC_WRITE : PAGE_UNCACHED;
394 jermar 383
    lo->pfn = pfn;
384
}
399 jermar 385
 
386
void prepare_entry_hi(entry_hi_t *hi, asid_t asid, __address addr)
387
{
983 palkovsky 388
    hi->value = ALIGN_DOWN(addr, PAGE_SIZE * 2);
399 jermar 389
    hi->asid = asid;
390
}
569 jermar 391
 
594 jermar 392
/** Print contents of TLB. */
569 jermar 393
void tlb_print(void)
394
{
612 jermar 395
    page_mask_t mask;
594 jermar 396
    entry_lo_t lo0, lo1;
704 jermar 397
    entry_hi_t hi, hi_save;
594 jermar 398
    int i;
399
 
704 jermar 400
    hi_save.value = cp0_entry_hi_read();
401
 
594 jermar 402
    printf("TLB:\n");
403
    for (i = 0; i < TLB_ENTRY_COUNT; i++) {
404
        cp0_index_write(i);
405
        tlbr();
406
 
612 jermar 407
        mask.value = cp0_pagemask_read();
594 jermar 408
        hi.value = cp0_entry_hi_read();
409
        lo0.value = cp0_entry_lo0_read();
410
        lo1.value = cp0_entry_lo1_read();
411
 
1196 cejka 412
        printf("%d: asid=%d, vpn2=%d, mask=%d\tg[0]=%d, v[0]=%d, d[0]=%d, c[0]=%hhd, pfn[0]=%d\n"
413
               "\t\t\t\tg[1]=%d, v[1]=%d, d[1]=%d, c[1]=%hhd, pfn[1]=%d\n",
612 jermar 414
               i, hi.asid, hi.vpn2, mask.mask, lo0.g, lo0.v, lo0.d, lo0.c, lo0.pfn,
594 jermar 415
               lo1.g, lo1.v, lo1.d, lo1.c, lo1.pfn);
416
    }
704 jermar 417
 
418
    cp0_entry_hi_write(hi_save.value);
569 jermar 419
}
598 jermar 420
 
618 jermar 421
/** Invalidate all not wired TLB entries. */
598 jermar 422
void tlb_invalidate_all(void)
423
{
599 jermar 424
    ipl_t ipl;
425
    entry_lo_t lo0, lo1;
704 jermar 426
    entry_hi_t hi_save;
598 jermar 427
    int i;
428
 
704 jermar 429
    hi_save.value = cp0_entry_hi_read();
599 jermar 430
    ipl = interrupts_disable();
598 jermar 431
 
618 jermar 432
    for (i = TLB_WIRED; i < TLB_ENTRY_COUNT; i++) {
598 jermar 433
        cp0_index_write(i);
599 jermar 434
        tlbr();
435
 
436
        lo0.value = cp0_entry_lo0_read();
437
        lo1.value = cp0_entry_lo1_read();
438
 
439
        lo0.v = 0;
440
        lo1.v = 0;
441
 
442
        cp0_entry_lo0_write(lo0.value);
443
        cp0_entry_lo1_write(lo1.value);
444
 
598 jermar 445
        tlbwi();
446
    }
599 jermar 447
 
448
    interrupts_restore(ipl);
704 jermar 449
    cp0_entry_hi_write(hi_save.value);
598 jermar 450
}
451
 
452
/** Invalidate all TLB entries belonging to specified address space.
453
 *
454
 * @param asid Address space identifier.
455
 */
456
void tlb_invalidate_asid(asid_t asid)
457
{
599 jermar 458
    ipl_t ipl;
459
    entry_lo_t lo0, lo1;
704 jermar 460
    entry_hi_t hi, hi_save;
598 jermar 461
    int i;
462
 
599 jermar 463
    ASSERT(asid != ASID_INVALID);
464
 
704 jermar 465
    hi_save.value = cp0_entry_hi_read();
599 jermar 466
    ipl = interrupts_disable();
467
 
598 jermar 468
    for (i = 0; i < TLB_ENTRY_COUNT; i++) {
469
        cp0_index_write(i);
470
        tlbr();
471
 
599 jermar 472
        hi.value = cp0_entry_hi_read();
473
 
598 jermar 474
        if (hi.asid == asid) {
599 jermar 475
            lo0.value = cp0_entry_lo0_read();
476
            lo1.value = cp0_entry_lo1_read();
477
 
478
            lo0.v = 0;
479
            lo1.v = 0;
480
 
481
            cp0_entry_lo0_write(lo0.value);
482
            cp0_entry_lo1_write(lo1.value);
483
 
598 jermar 484
            tlbwi();
485
        }
486
    }
599 jermar 487
 
488
    interrupts_restore(ipl);
704 jermar 489
    cp0_entry_hi_write(hi_save.value);
598 jermar 490
}
491
 
727 jermar 492
/** Invalidate TLB entries for specified page range belonging to specified address space.
598 jermar 493
 *
494
 * @param asid Address space identifier.
727 jermar 495
 * @param page First page whose TLB entry is to be invalidated.
496
 * @param cnt Number of entries to invalidate.
598 jermar 497
 */
727 jermar 498
void tlb_invalidate_pages(asid_t asid, __address page, count_t cnt)
598 jermar 499
{
727 jermar 500
    int i;
599 jermar 501
    ipl_t ipl;
502
    entry_lo_t lo0, lo1;
704 jermar 503
    entry_hi_t hi, hi_save;
598 jermar 504
    tlb_index_t index;
505
 
599 jermar 506
    ASSERT(asid != ASID_INVALID);
507
 
704 jermar 508
    hi_save.value = cp0_entry_hi_read();
599 jermar 509
    ipl = interrupts_disable();
510
 
983 palkovsky 511
    for (i = 0; i < cnt+1; i+=2) {
727 jermar 512
        hi.value = 0;
513
        prepare_entry_hi(&hi, asid, page + i * PAGE_SIZE);
514
        cp0_entry_hi_write(hi.value);
599 jermar 515
 
727 jermar 516
        tlbp();
517
        index.value = cp0_index_read();
598 jermar 518
 
727 jermar 519
        if (!index.p) {
520
            /* Entry was found, index register contains valid index. */
521
            tlbr();
599 jermar 522
 
727 jermar 523
            lo0.value = cp0_entry_lo0_read();
524
            lo1.value = cp0_entry_lo1_read();
599 jermar 525
 
727 jermar 526
            lo0.v = 0;
527
            lo1.v = 0;
599 jermar 528
 
727 jermar 529
            cp0_entry_lo0_write(lo0.value);
530
            cp0_entry_lo1_write(lo1.value);
599 jermar 531
 
727 jermar 532
            tlbwi();
533
        }
598 jermar 534
    }
599 jermar 535
 
536
    interrupts_restore(ipl);
704 jermar 537
    cp0_entry_hi_write(hi_save.value);
598 jermar 538
}