Subversion Repositories HelenOS

Rev

Rev 2009 | Rev 2071 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2009 Rev 2048
Line 55... Line 55...
55
#include <arch/mm/tsb.h>
55
#include <arch/mm/tsb.h>
56
#endif
56
#endif
57
 
57
 
58
static void dtlb_pte_copy(pte_t *t, bool ro);
58
static void dtlb_pte_copy(pte_t *t, bool ro);
59
static void itlb_pte_copy(pte_t *t);
59
static void itlb_pte_copy(pte_t *t);
60
static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char *str);
60
static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const
-
 
61
    char *str);
61
static void do_fast_data_access_mmu_miss_fault(istate_t *istate, tlb_tag_access_reg_t tag, const char *str);
62
static void do_fast_data_access_mmu_miss_fault(istate_t *istate,
-
 
63
     tlb_tag_access_reg_t tag, const char *str);
62
static void do_fast_data_access_protection_fault(istate_t *istate, tlb_tag_access_reg_t tag, const char *str);
64
static void do_fast_data_access_protection_fault(istate_t *istate,
-
 
65
    tlb_tag_access_reg_t tag, const char *str);
63
 
66
 
64
char *context_encoding[] = {
67
char *context_encoding[] = {
65
    "Primary",
68
    "Primary",
66
    "Secondary",
69
    "Secondary",
67
    "Nucleus",
70
    "Nucleus",
Line 88... Line 91...
88
 * @param frame Physical frame address.
91
 * @param frame Physical frame address.
89
 * @param pagesize Page size.
92
 * @param pagesize Page size.
90
 * @param locked True for permanent mappings, false otherwise.
93
 * @param locked True for permanent mappings, false otherwise.
91
 * @param cacheable True if the mapping is cacheable, false otherwise.
94
 * @param cacheable True if the mapping is cacheable, false otherwise.
92
 */
95
 */
93
void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize, bool locked, bool cacheable)
96
void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize, bool
-
 
97
    locked, bool cacheable)
94
{
98
{
95
    tlb_tag_access_reg_t tag;
99
    tlb_tag_access_reg_t tag;
96
    tlb_data_t data;
100
    tlb_data_t data;
97
    page_address_t pg;
101
    page_address_t pg;
98
    frame_address_t fr;
102
    frame_address_t fr;
Line 122... Line 126...
122
}
126
}
123
 
127
 
124
/** Copy PTE to TLB.
128
/** Copy PTE to TLB.
125
 *
129
 *
126
 * @param t Page Table Entry to be copied.
130
 * @param t Page Table Entry to be copied.
127
 * @param ro If true, the entry will be created read-only, regardless of its w field.
131
 * @param ro If true, the entry will be created read-only, regardless of its w
-
 
132
 *  field.
128
 */
133
 */
129
void dtlb_pte_copy(pte_t *t, bool ro)
134
void dtlb_pte_copy(pte_t *t, bool ro)
130
{
135
{
131
    tlb_tag_access_reg_t tag;
136
    tlb_tag_access_reg_t tag;
132
    tlb_data_t data;
137
    tlb_data_t data;
Line 210... Line 215...
210
        itsb_pte_copy(t);
215
        itsb_pte_copy(t);
211
#endif
216
#endif
212
        page_table_unlock(AS, true);
217
        page_table_unlock(AS, true);
213
    } else {
218
    } else {
214
        /*
219
        /*
215
         * Forward the page fault to the address space page fault handler.
220
         * Forward the page fault to the address space page fault
-
 
221
         * handler.
216
         */    
222
         */    
217
        page_table_unlock(AS, true);
223
        page_table_unlock(AS, true);
218
        if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
224
        if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
219
            do_fast_instruction_access_mmu_miss_fault(istate, __FUNCTION__);
225
            do_fast_instruction_access_mmu_miss_fault(istate,
-
 
226
                __FUNCTION__);
220
        }
227
        }
221
    }
228
    }
222
}
229
}
223
 
230
 
224
/** DTLB miss handler.
231
/** DTLB miss handler.
225
 *
232
 *
226
 * Note that some faults (e.g. kernel faults) were already resolved
233
 * Note that some faults (e.g. kernel faults) were already resolved by the
227
 * by the low-level, assembly language part of the fast_data_access_mmu_miss
234
 * low-level, assembly language part of the fast_data_access_mmu_miss handler.
228
 * handler.
-
 
229
 */
235
 */
230
void fast_data_access_mmu_miss(int n, istate_t *istate)
236
void fast_data_access_mmu_miss(int n, istate_t *istate)
231
{
237
{
232
    tlb_tag_access_reg_t tag;
238
    tlb_tag_access_reg_t tag;
233
    uintptr_t va;
239
    uintptr_t va;
Line 237... Line 243...
237
    va = tag.vpn << PAGE_WIDTH;
243
    va = tag.vpn << PAGE_WIDTH;
238
 
244
 
239
    if (tag.context == ASID_KERNEL) {
245
    if (tag.context == ASID_KERNEL) {
240
        if (!tag.vpn) {
246
        if (!tag.vpn) {
241
            /* NULL access in kernel */
247
            /* NULL access in kernel */
242
            do_fast_data_access_mmu_miss_fault(istate, tag, __FUNCTION__);
248
            do_fast_data_access_mmu_miss_fault(istate, tag,
-
 
249
                __FUNCTION__);
243
        }
250
        }
244
        do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected kernel page fault.");
251
        do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected "
-
 
252
            "kernel page fault.");
245
    }
253
    }
246
 
254
 
247
    page_table_lock(AS, true);
255
    page_table_lock(AS, true);
248
    t = page_mapping_find(AS, va);
256
    t = page_mapping_find(AS, va);
249
    if (t) {
257
    if (t) {
Line 261... Line 269...
261
        /*
269
        /*
262
         * Forward the page fault to the address space page fault handler.
270
         * Forward the page fault to the address space page fault handler.
263
         */    
271
         */    
264
        page_table_unlock(AS, true);
272
        page_table_unlock(AS, true);
265
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
273
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
266
            do_fast_data_access_mmu_miss_fault(istate, tag, __FUNCTION__);
274
            do_fast_data_access_mmu_miss_fault(istate, tag,
-
 
275
                __FUNCTION__);
267
        }
276
        }
268
    }
277
    }
269
}
278
}
270
 
279
 
271
/** DTLB protection fault handler. */
280
/** DTLB protection fault handler. */
Line 280... Line 289...
280
 
289
 
281
    page_table_lock(AS, true);
290
    page_table_lock(AS, true);
282
    t = page_mapping_find(AS, va);
291
    t = page_mapping_find(AS, va);
283
    if (t && PTE_WRITABLE(t)) {
292
    if (t && PTE_WRITABLE(t)) {
284
        /*
293
        /*
285
         * The mapping was found in the software page hash table and is writable.
294
         * The mapping was found in the software page hash table and is
286
         * Demap the old mapping and insert an updated mapping into DTLB.
295
         * writable. Demap the old mapping and insert an updated mapping
-
 
296
         * into DTLB.
287
         */
297
         */
288
        t->a = true;
298
        t->a = true;
289
        t->d = true;
299
        t->d = true;
290
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY, va);
300
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY, va);
291
        dtlb_pte_copy(t, false);
301
        dtlb_pte_copy(t, false);
Line 293... Line 303...
293
        dtsb_pte_copy(t, false);
303
        dtsb_pte_copy(t, false);
294
#endif
304
#endif
295
        page_table_unlock(AS, true);
305
        page_table_unlock(AS, true);
296
    } else {
306
    } else {
297
        /*
307
        /*
298
         * Forward the page fault to the address space page fault handler.
308
         * Forward the page fault to the address space page fault
-
 
309
         * handler.
299
         */    
310
         */    
300
        page_table_unlock(AS, true);
311
        page_table_unlock(AS, true);
301
        if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
312
        if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
302
            do_fast_data_access_protection_fault(istate, tag, __FUNCTION__);
313
            do_fast_data_access_protection_fault(istate, tag,
-
 
314
                __FUNCTION__);
303
        }
315
        }
304
    }
316
    }
305
}
317
}
306
 
318
 
307
/** Print contents of both TLBs. */
319
/** Print contents of both TLBs. */
Line 314... Line 326...
314
    printf("I-TLB contents:\n");
326
    printf("I-TLB contents:\n");
315
    for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
327
    for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
316
        d.value = itlb_data_access_read(i);
328
        d.value = itlb_data_access_read(i);
317
        t.value = itlb_tag_read_read(i);
329
        t.value = itlb_tag_read_read(i);
318
       
330
       
319
        printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n",
331
        printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
-
 
332
            "ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, "
-
 
333
            "cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
320
            i, t.vpn, t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag, d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
334
            t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag,
-
 
335
            d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
321
    }
336
    }
322
 
337
 
323
    printf("D-TLB contents:\n");
338
    printf("D-TLB contents:\n");
324
    for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
339
    for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
325
        d.value = dtlb_data_access_read(i);
340
        d.value = dtlb_data_access_read(i);
326
        t.value = dtlb_tag_read_read(i);
341
        t.value = dtlb_tag_read_read(i);
327
       
342
       
328
        printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n",
343
        printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
-
 
344
            "ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, "
-
 
345
            "cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
329
            i, t.vpn, t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag, d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
346
            t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag,
-
 
347
            d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
330
    }
348
    }
331
 
349
 
332
}
350
}
333
 
351
 
334
void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char *str)
352
void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char
-
 
353
    *str)
335
{
354
{
336
    fault_if_from_uspace(istate, "%s\n", str);
355
    fault_if_from_uspace(istate, "%s\n", str);
337
    dump_istate(istate);
356
    dump_istate(istate);
338
    panic("%s\n", str);
357
    panic("%s\n", str);
339
}
358
}
340
 
359
 
341
void do_fast_data_access_mmu_miss_fault(istate_t *istate, tlb_tag_access_reg_t tag, const char *str)
360
void do_fast_data_access_mmu_miss_fault(istate_t *istate, tlb_tag_access_reg_t
-
 
361
    tag, const char *str)
342
{
362
{
343
    uintptr_t va;
363
    uintptr_t va;
344
 
364
 
345
    va = tag.vpn << PAGE_WIDTH;
365
    va = tag.vpn << PAGE_WIDTH;
346
 
366
 
347
    fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va, tag.context);
367
    fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va,
-
 
368
        tag.context);
348
    dump_istate(istate);
369
    dump_istate(istate);
349
    printf("Faulting page: %p, ASID=%d\n", va, tag.context);
370
    printf("Faulting page: %p, ASID=%d\n", va, tag.context);
350
    panic("%s\n", str);
371
    panic("%s\n", str);
351
}
372
}
352
 
373
 
353
void do_fast_data_access_protection_fault(istate_t *istate, tlb_tag_access_reg_t tag, const char *str)
374
void do_fast_data_access_protection_fault(istate_t *istate, tlb_tag_access_reg_t
-
 
375
    tag, const char *str)
354
{
376
{
355
    uintptr_t va;
377
    uintptr_t va;
356
 
378
 
357
    va = tag.vpn << PAGE_WIDTH;
379
    va = tag.vpn << PAGE_WIDTH;
358
 
380
 
359
    fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va, tag.context);
381
    fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va,
-
 
382
        tag.context);
360
    printf("Faulting page: %p, ASID=%d\n", va, tag.context);
383
    printf("Faulting page: %p, ASID=%d\n", va, tag.context);
361
    dump_istate(istate);
384
    dump_istate(istate);
362
    panic("%s\n", str);
385
    panic("%s\n", str);
363
}
386
}
364
 
387
 
Line 368... Line 391...
368
    uintptr_t sfar;
391
    uintptr_t sfar;
369
 
392
 
370
    sfsr.value = dtlb_sfsr_read();
393
    sfsr.value = dtlb_sfsr_read();
371
    sfar = dtlb_sfar_read();
394
    sfar = dtlb_sfar_read();
372
   
395
   
373
    printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, fv=%d\n",
396
    printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, "
374
        sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, sfsr.ow, sfsr.fv);
397
        "fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w,
-
 
398
        sfsr.ow, sfsr.fv);
375
    printf("DTLB SFAR: address=%p\n", sfar);
399
    printf("DTLB SFAR: address=%p\n", sfar);
376
   
400
   
377
    dtlb_sfsr_write(0);
401
    dtlb_sfsr_write(0);
378
}
402
}
379
 
403
 
Line 404... Line 428...
404
        }
428
        }
405
    }
429
    }
406
   
430
   
407
}
431
}
408
 
432
 
409
/** Invalidate all ITLB and DTLB entries that belong to specified ASID (Context).
433
/** Invalidate all ITLB and DTLB entries that belong to specified ASID
-
 
434
 * (Context).
410
 *
435
 *
411
 * @param asid Address Space ID.
436
 * @param asid Address Space ID.
412
 */
437
 */
413
void tlb_invalidate_asid(asid_t asid)
438
void tlb_invalidate_asid(asid_t asid)
414
{
439
{
Line 427... Line 452...
427
    mmu_primary_context_write(pc_save.v);
452
    mmu_primary_context_write(pc_save.v);
428
   
453
   
429
    nucleus_leave();
454
    nucleus_leave();
430
}
455
}
431
 
456
 
432
/** Invalidate all ITLB and DTLB entries for specified page range in specified address space.
457
/** Invalidate all ITLB and DTLB entries for specified page range in specified
-
 
458
 * address space.
433
 *
459
 *
434
 * @param asid Address Space ID.
460
 * @param asid Address Space ID.
435
 * @param page First page which to sweep out from ITLB and DTLB.
461
 * @param page First page which to sweep out from ITLB and DTLB.
436
 * @param cnt Number of ITLB and DTLB entries to invalidate.
462
 * @param cnt Number of ITLB and DTLB entries to invalidate.
437
 */
463
 */
Line 446... Line 472...
446
    ctx.v = pc_save.v = mmu_primary_context_read();
472
    ctx.v = pc_save.v = mmu_primary_context_read();
447
    ctx.context = asid;
473
    ctx.context = asid;
448
    mmu_primary_context_write(ctx.v);
474
    mmu_primary_context_write(ctx.v);
449
   
475
   
450
    for (i = 0; i < cnt; i++) {
476
    for (i = 0; i < cnt; i++) {
451
        itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, page + i * PAGE_SIZE);
477
        itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, page + i *
-
 
478
            PAGE_SIZE);
452
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, page + i * PAGE_SIZE);
479
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, page + i *
-
 
480
            PAGE_SIZE);
453
    }
481
    }
454
   
482
   
455
    mmu_primary_context_write(pc_save.v);
483
    mmu_primary_context_write(pc_save.v);
456
   
484
   
457
    nucleus_leave();
485
    nucleus_leave();