Subversion Repositories HelenOS

Rev

Rev 3862 | Rev 4129 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
3771 rimsky 1
/*
2
 * Copyright (c) 2005 Jakub Jermar
3
 * Copyright (c) 2008 Pavel Rimsky
4
 * All rights reserved.
5
 *
6
 * Redistribution and use in source and binary forms, with or without
7
 * modification, are permitted provided that the following conditions
8
 * are met:
9
 *
10
 * - Redistributions of source code must retain the above copyright
11
 *   notice, this list of conditions and the following disclaimer.
12
 * - Redistributions in binary form must reproduce the above copyright
13
 *   notice, this list of conditions and the following disclaimer in the
14
 *   documentation and/or other materials provided with the distribution.
15
 * - The name of the author may not be used to endorse or promote products
16
 *   derived from this software without specific prior written permission.
17
 *
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
 */
29
 
30
/** @addtogroup sparc64mm  
31
 * @{
32
 */
33
/** @file
34
 */
35
 
36
#include <mm/tlb.h>
37
#include <mm/as.h>
38
#include <mm/asid.h>
39
#include <arch/sun4v/hypercall.h>
40
#include <arch/mm/frame.h>
41
#include <arch/mm/page.h>
3862 rimsky 42
#include <arch/mm/tte.h>
43
#include <arch/mm/tlb.h>
3771 rimsky 44
#include <arch/interrupt.h>
45
#include <interrupt.h>
46
#include <arch.h>
47
#include <print.h>
48
#include <arch/types.h>
49
#include <config.h>
50
#include <arch/trap/trap.h>
51
#include <arch/trap/exception.h>
52
#include <panic.h>
53
#include <arch/asm.h>
3862 rimsky 54
#include <arch/cpu.h>
3993 rimsky 55
#include <arch/mm/pagesize.h>
3771 rimsky 56
 
57
#ifdef CONFIG_TSB
58
#include <arch/mm/tsb.h>
59
#endif
60
 
3993 rimsky 61
static void itlb_pte_copy(pte_t *);
62
static void dtlb_pte_copy(pte_t *, bool);
3771 rimsky 63
static void do_fast_instruction_access_mmu_miss_fault(istate_t *, const char *);
3993 rimsky 64
static void do_fast_data_access_mmu_miss_fault(istate_t *, uint64_t,
3771 rimsky 65
    const char *);
66
static void do_fast_data_access_protection_fault(istate_t *,
3993 rimsky 67
    uint64_t, const char *);
3771 rimsky 68
 
3993 rimsky 69
#if 0
3771 rimsky 70
char *context_encoding[] = {
71
    "Primary",
72
    "Secondary",
73
    "Nucleus",
74
    "Reserved"
75
};
76
#endif
77
 
3835 rimsky 78
/*
3993 rimsky 79
 * The assembly language routine passes a 64-bit parameter to the Data Access
80
 * MMU Miss and Data Access protection handlers, the parameter encapsulates
81
 * a virtual address of the faulting page and the faulting context. The most
82
 * significant 51 bits represent the VA of the faulting page and the least
83
 * significant 13 vits represent the faulting context. The following macros
84
 * extract the page and context out of the 64-bit parameter:
85
 */
86
 
87
/* extracts the VA of the faulting page */
88
#define DMISS_ADDRESS(page_and_ctx) (((page_and_ctx) >> 13) << 13)
89
 
90
/* extracts the faulting context */
91
#define DMISS_CONTEXT(page_and_ctx) ((page_and_ctx) & 0x1fff)
92
 
93
/*
3835 rimsky 94
 * Invalidate all non-locked DTLB and ITLB entries.
95
 */
3771 rimsky 96
void tlb_arch_init(void)
97
{
98
    tlb_invalidate_all();
99
}
100
 
101
/** Insert privileged mapping into DMMU TLB.
102
 *
103
 * @param page      Virtual page address.
104
 * @param frame     Physical frame address.
105
 * @param pagesize  Page size.
106
 * @param locked    True for permanent mappings, false otherwise.
107
 * @param cacheable True if the mapping is cacheable, false otherwise.
108
 */
109
void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize,
110
    bool locked, bool cacheable)
111
{
112
#if 0
113
    tlb_tag_access_reg_t tag;
114
    tlb_data_t data;
115
    page_address_t pg;
116
    frame_address_t fr;
117
 
118
    pg.address = page;
119
    fr.address = frame;
120
 
121
    tag.context = ASID_KERNEL;
122
    tag.vpn = pg.vpn;
123
 
124
    dtlb_tag_access_write(tag.value);
125
 
126
    data.value = 0;
127
    data.v = true;
128
    data.size = pagesize;
129
    data.pfn = fr.pfn;
130
    data.l = locked;
131
    data.cp = cacheable;
132
#ifdef CONFIG_VIRT_IDX_DCACHE
133
    data.cv = cacheable;
134
#endif /* CONFIG_VIRT_IDX_DCACHE */
135
    data.p = true;
136
    data.w = true;
137
    data.g = false;
138
 
139
    dtlb_data_in_write(data.value);
140
#endif
141
}
142
 
143
/** Copy PTE to TLB.
144
 *
145
 * @param t         Page Table Entry to be copied.
146
 * @param ro        If true, the entry will be created read-only, regardless
147
 *          of its w field.
148
 */
3993 rimsky 149
void dtlb_pte_copy(pte_t *t, bool ro)
3771 rimsky 150
{
3993 rimsky 151
    tte_data_t data;
152
 
3771 rimsky 153
    data.value = 0;
154
    data.v = true;
3993 rimsky 155
    data.nfo = false;
156
    data.ra = (t->frame) >> FRAME_WIDTH;
157
    data.ie = false;
158
    data.e = false;
3771 rimsky 159
    data.cp = t->c;
160
#ifdef CONFIG_VIRT_IDX_DCACHE
161
    data.cv = t->c;
3993 rimsky 162
#endif
163
    data.p = t->k;
164
    data.x = false;
3771 rimsky 165
    data.w = ro ? false : t->w;
3993 rimsky 166
    data.size = PAGESIZE_8K;
167
 
168
    __hypercall_hyperfast(
169
        t->page, t->as->asid, data.value, MMU_FLAG_DTLB, 0, MMU_MAP_ADDR);
3771 rimsky 170
}
171
 
172
/** Copy PTE to ITLB.
173
 *
174
 * @param t     Page Table Entry to be copied.
175
 */
3993 rimsky 176
void itlb_pte_copy(pte_t *t)
3771 rimsky 177
{
3993 rimsky 178
    tte_data_t data;
3771 rimsky 179
 
180
    data.value = 0;
181
    data.v = true;
3993 rimsky 182
    data.nfo = false;
183
    data.ra = (t->frame) >> FRAME_WIDTH;
184
    data.ie = false;
185
    data.e = false;
3771 rimsky 186
    data.cp = t->c;
3993 rimsky 187
    data.cv = false;
188
    data.p = t->k;
189
    data.x = true;
3771 rimsky 190
    data.w = false;
3993 rimsky 191
    data.size = PAGESIZE_8K;
3771 rimsky 192
 
3993 rimsky 193
    __hypercall_hyperfast(
194
        t->page, t->as->asid, data.value, MMU_FLAG_ITLB, 0, MMU_MAP_ADDR);
3771 rimsky 195
}
196
 
197
/** ITLB miss handler. */
198
void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate)
199
{
200
    uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
201
    pte_t *t;
202
 
203
    page_table_lock(AS, true);
204
    t = page_mapping_find(AS, va);
3993 rimsky 205
 
3771 rimsky 206
    if (t && PTE_EXECUTABLE(t)) {
207
        /*
208
         * The mapping was found in the software page hash table.
209
         * Insert it into ITLB.
210
         */
211
        t->a = true;
3993 rimsky 212
        itlb_pte_copy(t);
3771 rimsky 213
#ifdef CONFIG_TSB
3993 rimsky 214
        //itsb_pte_copy(t, index);
3771 rimsky 215
#endif
216
        page_table_unlock(AS, true);
217
    } else {
218
        /*
219
         * Forward the page fault to the address space page fault
220
         * handler.
221
         */    
222
        page_table_unlock(AS, true);
223
        if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
224
            do_fast_instruction_access_mmu_miss_fault(istate,
225
                __func__);
226
        }
227
    }
228
}
229
 
230
/** DTLB miss handler.
231
 *
232
 * Note that some faults (e.g. kernel faults) were already resolved by the
233
 * low-level, assembly language part of the fast_data_access_mmu_miss handler.
234
 *
3993 rimsky 235
 * @param page_and_ctx  A 64-bit value describing the fault. The most
236
 *          significant 51 bits of the value contain the virtual
237
 *          address which caused the fault truncated to the page
238
 *          boundary. The least significant 13 bits of the value
239
 *          contain the number of the context in which the fault
240
 *          occurred.
3771 rimsky 241
 * @param istate    Interrupted state saved on the stack.
242
 */
3993 rimsky 243
void fast_data_access_mmu_miss(uint64_t page_and_ctx, istate_t *istate)
244
{
3771 rimsky 245
    pte_t *t;
3993 rimsky 246
    uintptr_t va = DMISS_ADDRESS(page_and_ctx);
247
    uint16_t ctx = DMISS_CONTEXT(page_and_ctx);
3771 rimsky 248
 
3993 rimsky 249
    if (ctx == ASID_KERNEL) {
250
        if (va == 0) {
3771 rimsky 251
            /* NULL access in kernel */
3993 rimsky 252
            do_fast_data_access_mmu_miss_fault(istate, page_and_ctx,
3771 rimsky 253
                __func__);
254
        }
3993 rimsky 255
        do_fast_data_access_mmu_miss_fault(istate, page_and_ctx, "Unexpected "
3771 rimsky 256
            "kernel page fault.");
257
    }
258
 
259
    page_table_lock(AS, true);
260
    t = page_mapping_find(AS, va);
261
    if (t) {
262
        /*
263
         * The mapping was found in the software page hash table.
264
         * Insert it into DTLB.
265
         */
266
        t->a = true;
3993 rimsky 267
        dtlb_pte_copy(t, true);
3771 rimsky 268
#ifdef CONFIG_TSB
3993 rimsky 269
        //dtsb_pte_copy(t, true);
3771 rimsky 270
#endif
271
        page_table_unlock(AS, true);
272
    } else {
273
        /*
274
         * Forward the page fault to the address space page fault
275
         * handler.
276
         */    
277
        page_table_unlock(AS, true);
278
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
3993 rimsky 279
            do_fast_data_access_mmu_miss_fault(istate, page_and_ctx,
3771 rimsky 280
                __func__);
281
        }
282
    }
3993 rimsky 283
}
3771 rimsky 284
 
285
/** DTLB protection fault handler.
286
 *
3993 rimsky 287
 * @param page_and_ctx  A 64-bit value describing the fault. The most
288
 *          significant 51 bits of the value contain the virtual
289
 *          address which caused the fault truncated to the page
290
 *          boundary. The least significant 13 bits of the value
291
 *          contain the number of the context in which the fault
292
 *          occurred.
3771 rimsky 293
 * @param istate    Interrupted state saved on the stack.
294
 */
3993 rimsky 295
void fast_data_access_protection(uint64_t page_and_ctx, istate_t *istate)
296
{
3771 rimsky 297
    pte_t *t;
3993 rimsky 298
    uintptr_t va = DMISS_ADDRESS(page_and_ctx);
299
    uint16_t ctx = DMISS_CONTEXT(page_and_ctx);
3771 rimsky 300
 
301
    page_table_lock(AS, true);
302
    t = page_mapping_find(AS, va);
303
    if (t && PTE_WRITABLE(t)) {
304
        /*
305
         * The mapping was found in the software page hash table and is
306
         * writable. Demap the old mapping and insert an updated mapping
307
         * into DTLB.
308
         */
309
        t->a = true;
310
        t->d = true;
3993 rimsky 311
        mmu_demap_page(va, ctx, MMU_FLAG_DTLB);
312
        dtlb_pte_copy(t, false);
3771 rimsky 313
#ifdef CONFIG_TSB
3993 rimsky 314
        //dtsb_pte_copy(t, false);
3771 rimsky 315
#endif
316
        page_table_unlock(AS, true);
317
    } else {
318
        /*
319
         * Forward the page fault to the address space page fault
320
         * handler.
321
         */    
322
        page_table_unlock(AS, true);
323
        if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
3993 rimsky 324
            do_fast_data_access_protection_fault(istate, page_and_ctx,
3771 rimsky 325
                __func__);
326
        }
327
    }
3993 rimsky 328
}
3771 rimsky 329
 
330
/** Print TLB entry (for debugging purposes).
331
 *
332
 * The diag field has been left out in order to make this function more generic
333
 * (there is no diag field in US3 architeture).
334
 *
335
 * @param i     TLB entry number
336
 * @param t     TLB entry tag
337
 * @param d     TLB entry data
338
 */
339
#if 0
340
static void print_tlb_entry(int i, tlb_tag_read_reg_t t, tlb_data_t d)
341
{
342
    printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
343
        "ie=%d, soft2=%#x, pfn=%#x, soft=%#x, l=%d, "
344
        "cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
345
        t.context, d.v, d.size, d.nfo, d.ie, d.soft2,
346
        d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
347
}
348
#endif
349
 
350
void tlb_print(void)
3993 rimsky 351
{
3771 rimsky 352
#if 0
353
    int i;
354
    tlb_data_t d;
355
    tlb_tag_read_reg_t t;
356
 
357
    printf("I-TLB contents:\n");
358
    for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
359
        d.value = itlb_data_access_read(i);
360
        t.value = itlb_tag_read_read(i);
361
        print_tlb_entry(i, t, d);
362
    }
363
 
364
    printf("D-TLB contents:\n");
365
    for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
366
        d.value = dtlb_data_access_read(i);
367
        t.value = dtlb_tag_read_read(i);
368
        print_tlb_entry(i, t, d);
369
    }
370
#endif
371
}
372
 
373
void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,
374
    const char *str)
375
{
376
    fault_if_from_uspace(istate, "%s\n", str);
377
    dump_istate(istate);
378
    panic("%s\n", str);
379
}
380
 
381
void do_fast_data_access_mmu_miss_fault(istate_t *istate,
3993 rimsky 382
    uint64_t page_and_ctx, const char *str)
3771 rimsky 383
{
3993 rimsky 384
    if (DMISS_CONTEXT(page_and_ctx)) {
385
        fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, DMISS_ADDRESS(page_and_ctx),
386
            DMISS_CONTEXT(page_and_ctx));
3771 rimsky 387
    }
388
    dump_istate(istate);
3993 rimsky 389
    printf("Faulting page: %p, ASID=%d\n", DMISS_ADDRESS(page_and_ctx), DMISS_CONTEXT(page_and_ctx));
3771 rimsky 390
    panic("%s\n", str);
391
}
392
 
393
void do_fast_data_access_protection_fault(istate_t *istate,
3993 rimsky 394
    uint64_t page_and_ctx, const char *str)
3771 rimsky 395
{
3993 rimsky 396
    if (DMISS_CONTEXT(page_and_ctx)) {
397
        fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, DMISS_ADDRESS(page_and_ctx),
398
            DMISS_CONTEXT(page_and_ctx));
3771 rimsky 399
    }
3993 rimsky 400
    printf("Faulting page: %p, ASID=%d\n", DMISS_ADDRESS(page_and_ctx), DMISS_CONTEXT(page_and_ctx));
3771 rimsky 401
    dump_istate(istate);
402
    panic("%s\n", str);
403
}
404
 
405
void describe_mmu_fault(void)
406
{
407
}
408
 
409
/** Invalidate all unlocked ITLB and DTLB entries. */
410
void tlb_invalidate_all(void)
411
{
412
    uint64_t errno =  __hypercall_fast3(MMU_DEMAP_ALL, 0, 0,
413
        MMU_FLAG_DTLB | MMU_FLAG_ITLB);
414
    if (errno != EOK) {
415
        panic("Error code = %d.\n", errno);
416
    }
417
}
418
 
419
/** Invalidate all ITLB and DTLB entries that belong to specified ASID
420
 * (Context).
421
 *
422
 * @param asid Address Space ID.
423
 */
424
void tlb_invalidate_asid(asid_t asid)
425
{
426
#if 0
427
    tlb_context_reg_t pc_save, ctx;
428
 
429
    /* switch to nucleus because we are mapped by the primary context */
430
    nucleus_enter();
431
 
432
    ctx.v = pc_save.v = mmu_primary_context_read();
433
    ctx.context = asid;
434
    mmu_primary_context_write(ctx.v);
435
 
436
    itlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
437
    dtlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
438
 
439
    mmu_primary_context_write(pc_save.v);
440
 
441
    nucleus_leave();
442
#endif
443
}
444
 
445
/** Invalidate all ITLB and DTLB entries for specified page range in specified
446
 * address space.
447
 *
448
 * @param asid      Address Space ID.
449
 * @param page      First page which to sweep out from ITLB and DTLB.
450
 * @param cnt       Number of ITLB and DTLB entries to invalidate.
451
 */
452
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
453
{
454
#if 0
455
    unsigned int i;
456
    tlb_context_reg_t pc_save, ctx;
457
 
458
    /* switch to nucleus because we are mapped by the primary context */
459
    nucleus_enter();
460
 
461
    ctx.v = pc_save.v = mmu_primary_context_read();
462
    ctx.context = asid;
463
    mmu_primary_context_write(ctx.v);
464
 
465
    for (i = 0; i < cnt * MMU_PAGES_PER_PAGE; i++) {
466
        itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
467
            page + i * MMU_PAGE_SIZE);
468
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
469
            page + i * MMU_PAGE_SIZE);
470
    }
471
 
472
    mmu_primary_context_write(pc_save.v);
473
 
474
    nucleus_leave();
475
#endif
476
}
477
 
478
/** @}
479
 */