Subversion Repositories HelenOS

Rev

Rev 3993 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3993 Rev 4129
1
/*
1
/*
2
 * Copyright (c) 2005 Jakub Jermar
2
 * Copyright (c) 2005 Jakub Jermar
3
 * Copyright (c) 2008 Pavel Rimsky
3
 * Copyright (c) 2008 Pavel Rimsky
4
 * All rights reserved.
4
 * All rights reserved.
5
 *
5
 *
6
 * Redistribution and use in source and binary forms, with or without
6
 * Redistribution and use in source and binary forms, with or without
7
 * modification, are permitted provided that the following conditions
7
 * modification, are permitted provided that the following conditions
8
 * are met:
8
 * are met:
9
 *
9
 *
10
 * - Redistributions of source code must retain the above copyright
10
 * - Redistributions of source code must retain the above copyright
11
 *   notice, this list of conditions and the following disclaimer.
11
 *   notice, this list of conditions and the following disclaimer.
12
 * - Redistributions in binary form must reproduce the above copyright
12
 * - Redistributions in binary form must reproduce the above copyright
13
 *   notice, this list of conditions and the following disclaimer in the
13
 *   notice, this list of conditions and the following disclaimer in the
14
 *   documentation and/or other materials provided with the distribution.
14
 *   documentation and/or other materials provided with the distribution.
15
 * - The name of the author may not be used to endorse or promote products
15
 * - The name of the author may not be used to endorse or promote products
16
 *   derived from this software without specific prior written permission.
16
 *   derived from this software without specific prior written permission.
17
 *
17
 *
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
 */
28
 */
29
 
29
 
30
/** @addtogroup sparc64mm  
30
/** @addtogroup sparc64mm  
31
 * @{
31
 * @{
32
 */
32
 */
33
/** @file
33
/** @file
34
 */
34
 */
35
 
35
 
36
#include <mm/tlb.h>
36
#include <mm/tlb.h>
37
#include <mm/as.h>
37
#include <mm/as.h>
38
#include <mm/asid.h>
38
#include <mm/asid.h>
39
#include <arch/sun4v/hypercall.h>
39
#include <arch/sun4v/hypercall.h>
40
#include <arch/mm/frame.h>
40
#include <arch/mm/frame.h>
41
#include <arch/mm/page.h>
41
#include <arch/mm/page.h>
42
#include <arch/mm/tte.h>
42
#include <arch/mm/tte.h>
43
#include <arch/mm/tlb.h>
43
#include <arch/mm/tlb.h>
44
#include <arch/interrupt.h>
44
#include <arch/interrupt.h>
45
#include <interrupt.h>
45
#include <interrupt.h>
46
#include <arch.h>
46
#include <arch.h>
47
#include <print.h>
47
#include <print.h>
48
#include <arch/types.h>
48
#include <arch/types.h>
49
#include <config.h>
49
#include <config.h>
50
#include <arch/trap/trap.h>
50
#include <arch/trap/trap.h>
51
#include <arch/trap/exception.h>
51
#include <arch/trap/exception.h>
52
#include <panic.h>
52
#include <panic.h>
53
#include <arch/asm.h>
53
#include <arch/asm.h>
54
#include <arch/cpu.h>
54
#include <arch/cpu.h>
55
#include <arch/mm/pagesize.h>
55
#include <arch/mm/pagesize.h>
56
 
56
 
57
#ifdef CONFIG_TSB
57
#ifdef CONFIG_TSB
58
#include <arch/mm/tsb.h>
58
#include <arch/mm/tsb.h>
59
#endif
59
#endif
60
 
60
 
61
static void itlb_pte_copy(pte_t *);
61
static void itlb_pte_copy(pte_t *);
62
static void dtlb_pte_copy(pte_t *, bool);
62
static void dtlb_pte_copy(pte_t *, bool);
63
static void do_fast_instruction_access_mmu_miss_fault(istate_t *, const char *);
63
static void do_fast_instruction_access_mmu_miss_fault(istate_t *, const char *);
64
static void do_fast_data_access_mmu_miss_fault(istate_t *, uint64_t,
64
static void do_fast_data_access_mmu_miss_fault(istate_t *, uint64_t,
65
    const char *);
65
    const char *);
66
static void do_fast_data_access_protection_fault(istate_t *,
66
static void do_fast_data_access_protection_fault(istate_t *,
67
    uint64_t, const char *);
67
    uint64_t, const char *);
68
 
68
 
69
#if 0
-
 
70
char *context_encoding[] = {
-
 
71
    "Primary",
-
 
72
    "Secondary",
-
 
73
    "Nucleus",
-
 
74
    "Reserved"
-
 
75
};
-
 
76
#endif
-
 
77
 
-
 
78
/*
69
/*
79
 * The assembly language routine passes a 64-bit parameter to the Data Access
70
 * The assembly language routine passes a 64-bit parameter to the Data Access
80
 * MMU Miss and Data Access protection handlers, the parameter encapsulates
71
 * MMU Miss and Data Access protection handlers, the parameter encapsulates
81
 * a virtual address of the faulting page and the faulting context. The most
72
 * a virtual address of the faulting page and the faulting context. The most
82
 * significant 51 bits represent the VA of the faulting page and the least
73
 * significant 51 bits represent the VA of the faulting page and the least
83
 * significant 13 vits represent the faulting context. The following macros
74
 * significant 13 vits represent the faulting context. The following macros
84
 * extract the page and context out of the 64-bit parameter:
75
 * extract the page and context out of the 64-bit parameter:
85
 */
76
 */
86
 
77
 
87
/* extracts the VA of the faulting page */
78
/* extracts the VA of the faulting page */
88
#define DMISS_ADDRESS(page_and_ctx) (((page_and_ctx) >> 13) << 13)
79
#define DMISS_ADDRESS(page_and_ctx) (((page_and_ctx) >> 13) << 13)
89
 
80
 
90
/* extracts the faulting context */
81
/* extracts the faulting context */
91
#define DMISS_CONTEXT(page_and_ctx) ((page_and_ctx) & 0x1fff)
82
#define DMISS_CONTEXT(page_and_ctx) ((page_and_ctx) & 0x1fff)
92
 
83
 
-
 
84
/**
-
 
85
 * Descriptions of fault types from the MMU Fault status area.
-
 
86
 *
-
 
87
 * fault_type[i] contains description of error for which the IFT or DFT
-
 
88
 * field of the MMU fault status area is i.
-
 
89
 */
-
 
90
char *fault_types[] = {
-
 
91
    "unknown",
-
 
92
    "fast miss",
-
 
93
    "fast protection",
-
 
94
    "MMU miss",
-
 
95
    "invalid RA",
-
 
96
    "privileged violation",
-
 
97
    "protection violation",
-
 
98
    "NFO access",
-
 
99
    "so page/NFO side effect",
-
 
100
    "invalid VA",
-
 
101
    "invalid ASI",
-
 
102
    "nc atomic",
-
 
103
    "privileged action",
-
 
104
    "unknown",
-
 
105
    "unaligned access",
-
 
106
    "invalid page size"
-
 
107
    };
-
 
108
   
-
 
109
 
-
 
110
/** Array of MMU fault status areas. */
-
 
111
extern mmu_fault_status_area_t mmu_fsas[MAX_NUM_STRANDS];
-
 
112
 
93
/*
113
/*
94
 * Invalidate all non-locked DTLB and ITLB entries.
114
 * Invalidate all non-locked DTLB and ITLB entries.
95
 */
115
 */
96
void tlb_arch_init(void)
116
void tlb_arch_init(void)
97
{
117
{
98
    tlb_invalidate_all();
118
    tlb_invalidate_all();
99
}
119
}
100
 
120
 
101
/** Insert privileged mapping into DMMU TLB.
121
/** Insert privileged mapping into DMMU TLB.
102
 *
122
 *
103
 * @param page      Virtual page address.
123
 * @param page      Virtual page address.
104
 * @param frame     Physical frame address.
124
 * @param frame     Physical frame address.
105
 * @param pagesize  Page size.
125
 * @param pagesize  Page size.
106
 * @param locked    True for permanent mappings, false otherwise.
126
 * @param locked    True for permanent mappings, false otherwise.
107
 * @param cacheable True if the mapping is cacheable, false otherwise.
127
 * @param cacheable True if the mapping is cacheable, false otherwise.
108
 */
128
 */
109
void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize,
129
void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize,
110
    bool locked, bool cacheable)
130
    bool locked, bool cacheable)
111
{
131
{
112
#if 0
-
 
113
    tlb_tag_access_reg_t tag;
-
 
114
    tlb_data_t data;
132
    tte_data_t data;
115
    page_address_t pg;
-
 
116
    frame_address_t fr;
-
 
117
 
-
 
118
    pg.address = page;
-
 
119
    fr.address = frame;
-
 
120
 
-
 
121
    tag.context = ASID_KERNEL;
-
 
122
    tag.vpn = pg.vpn;
-
 
123
 
-
 
124
    dtlb_tag_access_write(tag.value);
-
 
125
 
133
   
126
    data.value = 0;
134
    data.value = 0;
127
    data.v = true;
135
    data.v = true;
128
    data.size = pagesize;
136
    data.nfo = false;
-
 
137
    data.ra = frame >> FRAME_WIDTH;
129
    data.pfn = fr.pfn;
138
    data.ie = false;
130
    data.l = locked;
139
    data.e = false;
131
    data.cp = cacheable;
140
    data.cp = cacheable;
132
#ifdef CONFIG_VIRT_IDX_DCACHE
141
#ifdef CONFIG_VIRT_IDX_DCACHE
133
    data.cv = cacheable;
142
    data.cv = cacheable;
134
#endif /* CONFIG_VIRT_IDX_DCACHE */
-
 
135
    data.p = true;
-
 
136
    data.w = true;
-
 
137
    data.g = false;
-
 
138
 
-
 
139
    dtlb_data_in_write(data.value);
-
 
140
#endif
143
#endif
-
 
144
    data.p = true;
-
 
145
    data.x = false;
-
 
146
    data.w = false;
-
 
147
    data.size = pagesize;
-
 
148
   
-
 
149
    if (locked) {
-
 
150
        __hypercall_fast4(
-
 
151
            MMU_MAP_PERM_ADDR, page, 0, data.value, MMU_FLAG_DTLB);
-
 
152
    } else {
-
 
153
        __hypercall_hyperfast(
-
 
154
            page, ASID_KERNEL, data.value, MMU_FLAG_DTLB, 0,
-
 
155
            MMU_MAP_ADDR);
-
 
156
    }
141
}
157
}
142
 
158
 
143
/** Copy PTE to TLB.
159
/** Copy PTE to TLB.
144
 *
160
 *
145
 * @param t         Page Table Entry to be copied.
161
 * @param t         Page Table Entry to be copied.
146
 * @param ro        If true, the entry will be created read-only, regardless
162
 * @param ro        If true, the entry will be created read-only, regardless
147
 *          of its w field.
163
 *          of its w field.
148
 */
164
 */
149
void dtlb_pte_copy(pte_t *t, bool ro)
165
void dtlb_pte_copy(pte_t *t, bool ro)
150
{
166
{
151
    tte_data_t data;
167
    tte_data_t data;
152
   
168
   
153
    data.value = 0;
169
    data.value = 0;
154
    data.v = true;
170
    data.v = true;
155
    data.nfo = false;
171
    data.nfo = false;
156
    data.ra = (t->frame) >> FRAME_WIDTH;
172
    data.ra = (t->frame) >> FRAME_WIDTH;
157
    data.ie = false;
173
    data.ie = false;
158
    data.e = false;
174
    data.e = false;
159
    data.cp = t->c;
175
    data.cp = t->c;
160
#ifdef CONFIG_VIRT_IDX_DCACHE
176
#ifdef CONFIG_VIRT_IDX_DCACHE
161
    data.cv = t->c;
177
    data.cv = t->c;
162
#endif
178
#endif
163
    data.p = t->k;
179
    data.p = t->k;
164
    data.x = false;
180
    data.x = false;
165
    data.w = ro ? false : t->w;
181
    data.w = ro ? false : t->w;
166
    data.size = PAGESIZE_8K;
182
    data.size = PAGESIZE_8K;
167
   
183
   
168
    __hypercall_hyperfast(
184
    __hypercall_hyperfast(
169
        t->page, t->as->asid, data.value, MMU_FLAG_DTLB, 0, MMU_MAP_ADDR);
185
        t->page, t->as->asid, data.value, MMU_FLAG_DTLB, 0, MMU_MAP_ADDR);
170
}
186
}
171
 
187
 
172
/** Copy PTE to ITLB.
188
/** Copy PTE to ITLB.
173
 *
189
 *
174
 * @param t     Page Table Entry to be copied.
190
 * @param t     Page Table Entry to be copied.
175
 */
191
 */
176
void itlb_pte_copy(pte_t *t)
192
void itlb_pte_copy(pte_t *t)
177
{
193
{
178
    tte_data_t data;
194
    tte_data_t data;
179
   
195
   
180
    data.value = 0;
196
    data.value = 0;
181
    data.v = true;
197
    data.v = true;
182
    data.nfo = false;
198
    data.nfo = false;
183
    data.ra = (t->frame) >> FRAME_WIDTH;
199
    data.ra = (t->frame) >> FRAME_WIDTH;
184
    data.ie = false;
200
    data.ie = false;
185
    data.e = false;
201
    data.e = false;
186
    data.cp = t->c;
202
    data.cp = t->c;
187
    data.cv = false;
203
    data.cv = false;
188
    data.p = t->k;
204
    data.p = t->k;
189
    data.x = true;
205
    data.x = true;
190
    data.w = false;
206
    data.w = false;
191
    data.size = PAGESIZE_8K;
207
    data.size = PAGESIZE_8K;
192
   
208
   
193
    __hypercall_hyperfast(
209
    __hypercall_hyperfast(
194
        t->page, t->as->asid, data.value, MMU_FLAG_ITLB, 0, MMU_MAP_ADDR);
210
        t->page, t->as->asid, data.value, MMU_FLAG_ITLB, 0, MMU_MAP_ADDR);
195
}
211
}
196
 
212
 
197
/** ITLB miss handler. */
213
/** ITLB miss handler. */
198
void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate)
214
void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate)
199
{
215
{
200
    uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
216
    uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
201
    pte_t *t;
217
    pte_t *t;
202
 
218
 
203
    page_table_lock(AS, true);
219
    page_table_lock(AS, true);
204
    t = page_mapping_find(AS, va);
220
    t = page_mapping_find(AS, va);
205
 
221
 
206
    if (t && PTE_EXECUTABLE(t)) {
222
    if (t && PTE_EXECUTABLE(t)) {
207
        /*
223
        /*
208
         * The mapping was found in the software page hash table.
224
         * The mapping was found in the software page hash table.
209
         * Insert it into ITLB.
225
         * Insert it into ITLB.
210
         */
226
         */
211
        t->a = true;
227
        t->a = true;
212
        itlb_pte_copy(t);
228
        itlb_pte_copy(t);
213
#ifdef CONFIG_TSB
229
#ifdef CONFIG_TSB
214
        //itsb_pte_copy(t, index);
230
        itsb_pte_copy(t);
215
#endif
231
#endif
216
        page_table_unlock(AS, true);
232
        page_table_unlock(AS, true);
217
    } else {
233
    } else {
218
        /*
234
        /*
219
         * Forward the page fault to the address space page fault
235
         * Forward the page fault to the address space page fault
220
         * handler.
236
         * handler.
221
         */    
237
         */    
222
        page_table_unlock(AS, true);
238
        page_table_unlock(AS, true);
223
        if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
239
        if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
224
            do_fast_instruction_access_mmu_miss_fault(istate,
240
            do_fast_instruction_access_mmu_miss_fault(istate,
225
                __func__);
241
                __func__);
226
        }
242
        }
227
    }
243
    }
228
}
244
}
229
 
245
 
230
/** DTLB miss handler.
246
/** DTLB miss handler.
231
 *
247
 *
232
 * Note that some faults (e.g. kernel faults) were already resolved by the
248
 * Note that some faults (e.g. kernel faults) were already resolved by the
233
 * low-level, assembly language part of the fast_data_access_mmu_miss handler.
249
 * low-level, assembly language part of the fast_data_access_mmu_miss handler.
234
 *
250
 *
235
 * @param page_and_ctx  A 64-bit value describing the fault. The most
251
 * @param page_and_ctx  A 64-bit value describing the fault. The most
236
 *          significant 51 bits of the value contain the virtual
252
 *          significant 51 bits of the value contain the virtual
237
 *          address which caused the fault truncated to the page
253
 *          address which caused the fault truncated to the page
238
 *          boundary. The least significant 13 bits of the value
254
 *          boundary. The least significant 13 bits of the value
239
 *          contain the number of the context in which the fault
255
 *          contain the number of the context in which the fault
240
 *          occurred.
256
 *          occurred.
241
 * @param istate    Interrupted state saved on the stack.
257
 * @param istate    Interrupted state saved on the stack.
242
 */
258
 */
243
void fast_data_access_mmu_miss(uint64_t page_and_ctx, istate_t *istate)
259
void fast_data_access_mmu_miss(uint64_t page_and_ctx, istate_t *istate)
244
{
260
{
245
    pte_t *t;
261
    pte_t *t;
246
    uintptr_t va = DMISS_ADDRESS(page_and_ctx);
262
    uintptr_t va = DMISS_ADDRESS(page_and_ctx);
247
    uint16_t ctx = DMISS_CONTEXT(page_and_ctx);
263
    uint16_t ctx = DMISS_CONTEXT(page_and_ctx);
248
 
264
 
249
    if (ctx == ASID_KERNEL) {
265
    if (ctx == ASID_KERNEL) {
250
        if (va == 0) {
266
        if (va == 0) {
251
            /* NULL access in kernel */
267
            /* NULL access in kernel */
252
            do_fast_data_access_mmu_miss_fault(istate, page_and_ctx,
268
            do_fast_data_access_mmu_miss_fault(istate, page_and_ctx,
253
                __func__);
269
                __func__);
254
        }
270
        }
255
        do_fast_data_access_mmu_miss_fault(istate, page_and_ctx, "Unexpected "
271
        do_fast_data_access_mmu_miss_fault(istate, page_and_ctx, "Unexpected "
256
            "kernel page fault.");
272
            "kernel page fault.");
257
    }
273
    }
258
 
274
 
259
    page_table_lock(AS, true);
275
    page_table_lock(AS, true);
260
    t = page_mapping_find(AS, va);
276
    t = page_mapping_find(AS, va);
261
    if (t) {
277
    if (t) {
262
        /*
278
        /*
263
         * The mapping was found in the software page hash table.
279
         * The mapping was found in the software page hash table.
264
         * Insert it into DTLB.
280
         * Insert it into DTLB.
265
         */
281
         */
266
        t->a = true;
282
        t->a = true;
267
        dtlb_pte_copy(t, true);
283
        dtlb_pte_copy(t, true);
268
#ifdef CONFIG_TSB
284
#ifdef CONFIG_TSB
269
        //dtsb_pte_copy(t, true);
285
        dtsb_pte_copy(t, true);
270
#endif
286
#endif
271
        page_table_unlock(AS, true);
287
        page_table_unlock(AS, true);
272
    } else {
288
    } else {
273
        /*
289
        /*
274
         * Forward the page fault to the address space page fault
290
         * Forward the page fault to the address space page fault
275
         * handler.
291
         * handler.
276
         */    
292
         */    
277
        page_table_unlock(AS, true);
293
        page_table_unlock(AS, true);
278
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
294
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
279
            do_fast_data_access_mmu_miss_fault(istate, page_and_ctx,
295
            do_fast_data_access_mmu_miss_fault(istate, page_and_ctx,
280
                __func__);
296
                __func__);
281
        }
297
        }
282
    }
298
    }
283
}
299
}
284
 
300
 
285
/** DTLB protection fault handler.
301
/** DTLB protection fault handler.
286
 *
302
 *
287
 * @param page_and_ctx  A 64-bit value describing the fault. The most
303
 * @param page_and_ctx  A 64-bit value describing the fault. The most
288
 *          significant 51 bits of the value contain the virtual
304
 *          significant 51 bits of the value contain the virtual
289
 *          address which caused the fault truncated to the page
305
 *          address which caused the fault truncated to the page
290
 *          boundary. The least significant 13 bits of the value
306
 *          boundary. The least significant 13 bits of the value
291
 *          contain the number of the context in which the fault
307
 *          contain the number of the context in which the fault
292
 *          occurred.
308
 *          occurred.
293
 * @param istate    Interrupted state saved on the stack.
309
 * @param istate    Interrupted state saved on the stack.
294
 */
310
 */
295
void fast_data_access_protection(uint64_t page_and_ctx, istate_t *istate)
311
void fast_data_access_protection(uint64_t page_and_ctx, istate_t *istate)
296
{
312
{
297
    pte_t *t;
313
    pte_t *t;
298
    uintptr_t va = DMISS_ADDRESS(page_and_ctx);
314
    uintptr_t va = DMISS_ADDRESS(page_and_ctx);
299
    uint16_t ctx = DMISS_CONTEXT(page_and_ctx);
315
    uint16_t ctx = DMISS_CONTEXT(page_and_ctx);
300
 
316
 
301
    page_table_lock(AS, true);
317
    page_table_lock(AS, true);
302
    t = page_mapping_find(AS, va);
318
    t = page_mapping_find(AS, va);
303
    if (t && PTE_WRITABLE(t)) {
319
    if (t && PTE_WRITABLE(t)) {
304
        /*
320
        /*
305
         * The mapping was found in the software page hash table and is
321
         * The mapping was found in the software page hash table and is
306
         * writable. Demap the old mapping and insert an updated mapping
322
         * writable. Demap the old mapping and insert an updated mapping
307
         * into DTLB.
323
         * into DTLB.
308
         */
324
         */
309
        t->a = true;
325
        t->a = true;
310
        t->d = true;
326
        t->d = true;
311
        mmu_demap_page(va, ctx, MMU_FLAG_DTLB);
327
        mmu_demap_page(va, ctx, MMU_FLAG_DTLB);
312
        dtlb_pte_copy(t, false);
328
        dtlb_pte_copy(t, false);
313
#ifdef CONFIG_TSB
329
#ifdef CONFIG_TSB
314
        //dtsb_pte_copy(t, false);
330
        dtsb_pte_copy(t, false);
315
#endif
331
#endif
316
        page_table_unlock(AS, true);
332
        page_table_unlock(AS, true);
317
    } else {
333
    } else {
318
        /*
334
        /*
319
         * Forward the page fault to the address space page fault
335
         * Forward the page fault to the address space page fault
320
         * handler.
336
         * handler.
321
         */    
337
         */    
322
        page_table_unlock(AS, true);
338
        page_table_unlock(AS, true);
323
        if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
339
        if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
324
            do_fast_data_access_protection_fault(istate, page_and_ctx,
340
            do_fast_data_access_protection_fault(istate, page_and_ctx,
325
                __func__);
341
                __func__);
326
        }
342
        }
327
    }
343
    }
328
}
344
}
329
 
345
 
330
/** Print TLB entry (for debugging purposes).
-
 
331
 *
346
/*
332
 * The diag field has been left out in order to make this function more generic
347
 * On Niagara this function does not work, as supervisor software is isolated
333
 * (there is no diag field in US3 architeture).
348
 * from the TLB by the hypervisor and has no chance to investigate the TLB
334
 *
-
 
335
 * @param i     TLB entry number
-
 
336
 * @param t     TLB entry tag
349
 * entries.
337
 * @param d     TLB entry data
-
 
338
 */
350
 */
339
#if 0
-
 
340
static void print_tlb_entry(int i, tlb_tag_read_reg_t t, tlb_data_t d)
-
 
341
{
-
 
342
    printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
-
 
343
        "ie=%d, soft2=%#x, pfn=%#x, soft=%#x, l=%d, "
-
 
344
        "cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
-
 
345
        t.context, d.v, d.size, d.nfo, d.ie, d.soft2,
-
 
346
        d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
-
 
347
}
-
 
348
#endif
-
 
349
 
-
 
350
void tlb_print(void)
351
void tlb_print(void)
351
{
352
{
352
#if 0
-
 
353
    int i;
-
 
354
    tlb_data_t d;
-
 
355
    tlb_tag_read_reg_t t;
-
 
356
   
-
 
357
    printf("I-TLB contents:\n");
353
    printf("Operation not possible on Niagara.\n");
358
    for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
-
 
359
        d.value = itlb_data_access_read(i);
-
 
360
        t.value = itlb_tag_read_read(i);
-
 
361
        print_tlb_entry(i, t, d);
-
 
362
    }
-
 
363
 
-
 
364
    printf("D-TLB contents:\n");
-
 
365
    for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
-
 
366
        d.value = dtlb_data_access_read(i);
-
 
367
        t.value = dtlb_tag_read_read(i);
-
 
368
        print_tlb_entry(i, t, d);
-
 
369
    }
-
 
370
#endif
-
 
371
}
354
}
372
 
355
 
373
void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,
356
void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,
374
    const char *str)
357
    const char *str)
375
{
358
{
376
    fault_if_from_uspace(istate, "%s\n", str);
359
    fault_if_from_uspace(istate, "%s\n", str);
377
    dump_istate(istate);
360
    dump_istate(istate);
378
    panic("%s\n", str);
361
    panic("%s\n", str);
379
}
362
}
380
 
363
 
381
void do_fast_data_access_mmu_miss_fault(istate_t *istate,
364
void do_fast_data_access_mmu_miss_fault(istate_t *istate,
382
    uint64_t page_and_ctx, const char *str)
365
    uint64_t page_and_ctx, const char *str)
383
{
366
{
384
    if (DMISS_CONTEXT(page_and_ctx)) {
367
    if (DMISS_CONTEXT(page_and_ctx)) {
385
        fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, DMISS_ADDRESS(page_and_ctx),
368
        fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, DMISS_ADDRESS(page_and_ctx),
386
            DMISS_CONTEXT(page_and_ctx));
369
            DMISS_CONTEXT(page_and_ctx));
387
    }
370
    }
388
    dump_istate(istate);
371
    dump_istate(istate);
389
    printf("Faulting page: %p, ASID=%d\n", DMISS_ADDRESS(page_and_ctx), DMISS_CONTEXT(page_and_ctx));
372
    printf("Faulting page: %p, ASID=%d\n", DMISS_ADDRESS(page_and_ctx), DMISS_CONTEXT(page_and_ctx));
390
    panic("%s\n", str);
373
    panic("%s\n", str);
391
}
374
}
392
 
375
 
393
void do_fast_data_access_protection_fault(istate_t *istate,
376
void do_fast_data_access_protection_fault(istate_t *istate,
394
    uint64_t page_and_ctx, const char *str)
377
    uint64_t page_and_ctx, const char *str)
395
{
378
{
396
    if (DMISS_CONTEXT(page_and_ctx)) {
379
    if (DMISS_CONTEXT(page_and_ctx)) {
397
        fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, DMISS_ADDRESS(page_and_ctx),
380
        fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, DMISS_ADDRESS(page_and_ctx),
398
            DMISS_CONTEXT(page_and_ctx));
381
            DMISS_CONTEXT(page_and_ctx));
399
    }
382
    }
400
    printf("Faulting page: %p, ASID=%d\n", DMISS_ADDRESS(page_and_ctx), DMISS_CONTEXT(page_and_ctx));
383
    printf("Faulting page: %p, ASID=%d\n", DMISS_ADDRESS(page_and_ctx), DMISS_CONTEXT(page_and_ctx));
401
    dump_istate(istate);
384
    dump_istate(istate);
402
    panic("%s\n", str);
385
    panic("%s\n", str);
403
}
386
}
404
 
387
 
-
 
388
/**
-
 
389
 * Describes the exact condition which caused the last DMMU fault.
-
 
390
 */
405
void describe_mmu_fault(void)
391
void describe_dmmu_fault(void)
406
{
392
{
-
 
393
    uint64_t myid;
-
 
394
    __hypercall_fast_ret1(0, 0, 0, 0, 0, CPU_MYID, &myid);
-
 
395
 
-
 
396
    ASSERT(mmu_fsas[myid].dft < 16);
-
 
397
 
-
 
398
    printf("condition which caused the fault: %s\n",
-
 
399
        fault_types[mmu_fsas[myid].dft]);
407
}
400
}
408
 
401
 
409
/** Invalidate all unlocked ITLB and DTLB entries. */
402
/** Invalidate all unlocked ITLB and DTLB entries. */
410
void tlb_invalidate_all(void)
403
void tlb_invalidate_all(void)
411
{
404
{
412
    uint64_t errno =  __hypercall_fast3(MMU_DEMAP_ALL, 0, 0,
405
    uint64_t errno =  __hypercall_fast3(MMU_DEMAP_ALL, 0, 0,
413
        MMU_FLAG_DTLB | MMU_FLAG_ITLB);
406
        MMU_FLAG_DTLB | MMU_FLAG_ITLB);
414
    if (errno != EOK) {
407
    if (errno != EOK) {
415
        panic("Error code = %d.\n", errno);
408
        panic("Error code = %d.\n", errno);
416
    }
409
    }
417
}
410
}
418
 
411
 
419
/** Invalidate all ITLB and DTLB entries that belong to specified ASID
412
/** Invalidate all ITLB and DTLB entries that belong to specified ASID
420
 * (Context).
413
 * (Context).
421
 *
414
 *
422
 * @param asid Address Space ID.
415
 * @param asid Address Space ID.
423
 */
416
 */
424
void tlb_invalidate_asid(asid_t asid)
417
void tlb_invalidate_asid(asid_t asid)
425
{
418
{
426
#if 0
-
 
427
    tlb_context_reg_t pc_save, ctx;
-
 
428
   
-
 
429
    /* switch to nucleus because we are mapped by the primary context */
419
    /* switch to nucleus because we are mapped by the primary context */
430
    nucleus_enter();
420
    nucleus_enter();
431
   
-
 
432
    ctx.v = pc_save.v = mmu_primary_context_read();
-
 
433
    ctx.context = asid;
-
 
434
    mmu_primary_context_write(ctx.v);
-
 
435
   
421
 
436
    itlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
-
 
437
    dtlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
422
    __hypercall_fast4(MMU_DEMAP_CTX, 0, 0, asid,
438
   
-
 
439
    mmu_primary_context_write(pc_save.v);
423
        MMU_FLAG_ITLB | MMU_FLAG_DTLB);
440
   
424
 
441
    nucleus_leave();
425
    nucleus_leave();
442
#endif
-
 
443
}
426
}
444
 
427
 
445
/** Invalidate all ITLB and DTLB entries for specified page range in specified
428
/** Invalidate all ITLB and DTLB entries for specified page range in specified
446
 * address space.
429
 * address space.
447
 *
430
 *
448
 * @param asid      Address Space ID.
431
 * @param asid      Address Space ID.
449
 * @param page      First page which to sweep out from ITLB and DTLB.
432
 * @param page      First page which to sweep out from ITLB and DTLB.
450
 * @param cnt       Number of ITLB and DTLB entries to invalidate.
433
 * @param cnt       Number of ITLB and DTLB entries to invalidate.
451
 */
434
 */
452
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
435
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
453
{
436
{
454
#if 0
-
 
455
    unsigned int i;
437
    unsigned int i;
456
    tlb_context_reg_t pc_save, ctx;
-
 
457
   
438
 
458
    /* switch to nucleus because we are mapped by the primary context */
439
    /* switch to nucleus because we are mapped by the primary context */
459
    nucleus_enter();
440
    nucleus_enter();
460
   
-
 
461
    ctx.v = pc_save.v = mmu_primary_context_read();
-
 
462
    ctx.context = asid;
-
 
463
    mmu_primary_context_write(ctx.v);
-
 
464
   
441
 
465
    for (i = 0; i < cnt * MMU_PAGES_PER_PAGE; i++) {
442
    for (i = 0; i < cnt; i++) {
466
        itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
443
        __hypercall_fast5(MMU_DEMAP_PAGE, 0, 0, page, asid,
467
            page + i * MMU_PAGE_SIZE);
-
 
468
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
-
 
469
            page + i * MMU_PAGE_SIZE);
444
            MMU_FLAG_DTLB | MMU_FLAG_ITLB);
470
    }
445
    }
471
   
-
 
472
    mmu_primary_context_write(pc_save.v);
-
 
473
   
446
 
474
    nucleus_leave();
447
    nucleus_leave();
475
#endif
-
 
476
}
448
}
477
 
449
 
478
/** @}
450
/** @}
479
 */
451
 */
480
 
452