Subversion Repositories HelenOS

Rev

Rev 4129 | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 4129 Rev 4130
1
/*
1
/*
2
 * Copyright (c) 2005 Jakub Jermar
2
 * Copyright (c) 2005 Jakub Jermar
3
 * Copyright (c) 2008 Pavel Rimsky
3
 * Copyright (c) 2008 Pavel Rimsky
4
 * All rights reserved.
4
 * All rights reserved.
5
 *
5
 *
6
 * Redistribution and use in source and binary forms, with or without
6
 * Redistribution and use in source and binary forms, with or without
7
 * modification, are permitted provided that the following conditions
7
 * modification, are permitted provided that the following conditions
8
 * are met:
8
 * are met:
9
 *
9
 *
10
 * - Redistributions of source code must retain the above copyright
10
 * - Redistributions of source code must retain the above copyright
11
 *   notice, this list of conditions and the following disclaimer.
11
 *   notice, this list of conditions and the following disclaimer.
12
 * - Redistributions in binary form must reproduce the above copyright
12
 * - Redistributions in binary form must reproduce the above copyright
13
 *   notice, this list of conditions and the following disclaimer in the
13
 *   notice, this list of conditions and the following disclaimer in the
14
 *   documentation and/or other materials provided with the distribution.
14
 *   documentation and/or other materials provided with the distribution.
15
 * - The name of the author may not be used to endorse or promote products
15
 * - The name of the author may not be used to endorse or promote products
16
 *   derived from this software without specific prior written permission.
16
 *   derived from this software without specific prior written permission.
17
 *
17
 *
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28
 */
28
 */
29
 
29
 
30
/** @addtogroup sparc64mm  
30
/** @addtogroup sparc64mm  
31
 * @{
31
 * @{
32
 */
32
 */
33
/** @file
33
/** @file
34
 */
34
 */
35
 
35
 
36
#include <mm/tlb.h>
36
#include <mm/tlb.h>
37
#include <mm/as.h>
37
#include <mm/as.h>
38
#include <mm/asid.h>
38
#include <mm/asid.h>
39
#include <arch/sun4v/hypercall.h>
39
#include <arch/sun4v/hypercall.h>
40
#include <arch/mm/frame.h>
40
#include <arch/mm/frame.h>
41
#include <arch/mm/page.h>
41
#include <arch/mm/page.h>
42
#include <arch/mm/tte.h>
42
#include <arch/mm/tte.h>
43
#include <arch/mm/tlb.h>
43
#include <arch/mm/tlb.h>
44
#include <arch/interrupt.h>
44
#include <arch/interrupt.h>
45
#include <interrupt.h>
45
#include <interrupt.h>
46
#include <arch.h>
46
#include <arch.h>
47
#include <print.h>
47
#include <print.h>
48
#include <arch/types.h>
48
#include <arch/types.h>
49
#include <config.h>
49
#include <config.h>
50
#include <arch/trap/trap.h>
50
#include <arch/trap/trap.h>
51
#include <arch/trap/exception.h>
51
#include <arch/trap/exception.h>
52
#include <panic.h>
52
#include <panic.h>
53
#include <arch/asm.h>
53
#include <arch/asm.h>
54
#include <arch/cpu.h>
54
#include <arch/cpu.h>
55
#include <arch/mm/pagesize.h>
55
#include <arch/mm/pagesize.h>
56
 
56
 
57
#ifdef CONFIG_TSB
57
#ifdef CONFIG_TSB
58
#include <arch/mm/tsb.h>
58
#include <arch/mm/tsb.h>
59
#endif
59
#endif
60
 
60
 
61
static void itlb_pte_copy(pte_t *);
61
static void itlb_pte_copy(pte_t *);
62
static void dtlb_pte_copy(pte_t *, bool);
62
static void dtlb_pte_copy(pte_t *, bool);
63
static void do_fast_instruction_access_mmu_miss_fault(istate_t *, const char *);
63
static void do_fast_instruction_access_mmu_miss_fault(istate_t *, const char *);
64
static void do_fast_data_access_mmu_miss_fault(istate_t *, uint64_t,
64
static void do_fast_data_access_mmu_miss_fault(istate_t *, uint64_t,
65
    const char *);
65
    const char *);
66
static void do_fast_data_access_protection_fault(istate_t *,
66
static void do_fast_data_access_protection_fault(istate_t *,
67
    uint64_t, const char *);
67
    uint64_t, const char *);
68
 
68
 
69
/*
69
/*
70
 * The assembly language routine passes a 64-bit parameter to the Data Access
70
 * The assembly language routine passes a 64-bit parameter to the Data Access
71
 * MMU Miss and Data Access protection handlers, the parameter encapsulates
71
 * MMU Miss and Data Access protection handlers, the parameter encapsulates
72
 * a virtual address of the faulting page and the faulting context. The most
72
 * a virtual address of the faulting page and the faulting context. The most
73
 * significant 51 bits represent the VA of the faulting page and the least
73
 * significant 51 bits represent the VA of the faulting page and the least
74
 * significant 13 vits represent the faulting context. The following macros
74
 * significant 13 vits represent the faulting context. The following macros
75
 * extract the page and context out of the 64-bit parameter:
75
 * extract the page and context out of the 64-bit parameter:
76
 */
76
 */
77
 
77
 
78
/* extracts the VA of the faulting page */
78
/* extracts the VA of the faulting page */
79
#define DMISS_ADDRESS(page_and_ctx) (((page_and_ctx) >> 13) << 13)
79
#define DMISS_ADDRESS(page_and_ctx) (((page_and_ctx) >> 13) << 13)
80
 
80
 
81
/* extracts the faulting context */
81
/* extracts the faulting context */
82
#define DMISS_CONTEXT(page_and_ctx) ((page_and_ctx) & 0x1fff)
82
#define DMISS_CONTEXT(page_and_ctx) ((page_and_ctx) & 0x1fff)
83
 
83
 
84
/**
84
/**
85
 * Descriptions of fault types from the MMU Fault status area.
85
 * Descriptions of fault types from the MMU Fault status area.
86
 *
86
 *
87
 * fault_type[i] contains description of error for which the IFT or DFT
87
 * fault_type[i] contains description of error for which the IFT or DFT
88
 * field of the MMU fault status area is i.
88
 * field of the MMU fault status area is i.
89
 */
89
 */
90
char *fault_types[] = {
90
char *fault_types[] = {
91
    "unknown",
91
    "unknown",
92
    "fast miss",
92
    "fast miss",
93
    "fast protection",
93
    "fast protection",
94
    "MMU miss",
94
    "MMU miss",
95
    "invalid RA",
95
    "invalid RA",
96
    "privileged violation",
96
    "privileged violation",
97
    "protection violation",
97
    "protection violation",
98
    "NFO access",
98
    "NFO access",
99
    "so page/NFO side effect",
99
    "so page/NFO side effect",
100
    "invalid VA",
100
    "invalid VA",
101
    "invalid ASI",
101
    "invalid ASI",
102
    "nc atomic",
102
    "nc atomic",
103
    "privileged action",
103
    "privileged action",
104
    "unknown",
104
    "unknown",
105
    "unaligned access",
105
    "unaligned access",
106
    "invalid page size"
106
    "invalid page size"
107
    };
107
    };
108
   
108
   
109
 
109
 
110
/** Array of MMU fault status areas. */
110
/** Array of MMU fault status areas. */
111
extern mmu_fault_status_area_t mmu_fsas[MAX_NUM_STRANDS];
111
extern mmu_fault_status_area_t mmu_fsas[MAX_NUM_STRANDS];
112
 
112
 
113
/*
113
/*
114
 * Invalidate all non-locked DTLB and ITLB entries.
114
 * Invalidate all non-locked DTLB and ITLB entries.
115
 */
115
 */
116
void tlb_arch_init(void)
116
void tlb_arch_init(void)
117
{
117
{
118
    tlb_invalidate_all();
118
    tlb_invalidate_all();
119
}
119
}
120
 
120
 
121
/** Insert privileged mapping into DMMU TLB.
121
/** Insert privileged mapping into DMMU TLB.
122
 *
122
 *
123
 * @param page      Virtual page address.
123
 * @param page      Virtual page address.
124
 * @param frame     Physical frame address.
124
 * @param frame     Physical frame address.
125
 * @param pagesize  Page size.
125
 * @param pagesize  Page size.
126
 * @param locked    True for permanent mappings, false otherwise.
126
 * @param locked    True for permanent mappings, false otherwise.
127
 * @param cacheable True if the mapping is cacheable, false otherwise.
127
 * @param cacheable True if the mapping is cacheable, false otherwise.
128
 */
128
 */
129
void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize,
129
void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize,
130
    bool locked, bool cacheable)
130
    bool locked, bool cacheable)
131
{
131
{
132
    tte_data_t data;
132
    tte_data_t data;
133
   
133
   
134
    data.value = 0;
134
    data.value = 0;
135
    data.v = true;
135
    data.v = true;
136
    data.nfo = false;
136
    data.nfo = false;
137
    data.ra = frame >> FRAME_WIDTH;
137
    data.ra = frame >> FRAME_WIDTH;
138
    data.ie = false;
138
    data.ie = false;
139
    data.e = false;
139
    data.e = false;
140
    data.cp = cacheable;
140
    data.cp = cacheable;
141
#ifdef CONFIG_VIRT_IDX_DCACHE
141
#ifdef CONFIG_VIRT_IDX_DCACHE
142
    data.cv = cacheable;
142
    data.cv = cacheable;
143
#endif
143
#endif
144
    data.p = true;
144
    data.p = true;
145
    data.x = false;
145
    data.x = false;
146
    data.w = false;
146
    data.w = true;
147
    data.size = pagesize;
147
    data.size = pagesize;
148
   
148
   
149
    if (locked) {
149
    if (locked) {
150
        __hypercall_fast4(
150
        __hypercall_fast4(
151
            MMU_MAP_PERM_ADDR, page, 0, data.value, MMU_FLAG_DTLB);
151
            MMU_MAP_PERM_ADDR, page, 0, data.value, MMU_FLAG_DTLB);
152
    } else {
152
    } else {
153
        __hypercall_hyperfast(
153
        __hypercall_hyperfast(
154
            page, ASID_KERNEL, data.value, MMU_FLAG_DTLB, 0,
154
            page, ASID_KERNEL, data.value, MMU_FLAG_DTLB, 0,
155
            MMU_MAP_ADDR);
155
            MMU_MAP_ADDR);
156
    }
156
    }
157
}
157
}
158
 
158
 
159
/** Copy PTE to TLB.
159
/** Copy PTE to TLB.
160
 *
160
 *
161
 * @param t         Page Table Entry to be copied.
161
 * @param t         Page Table Entry to be copied.
162
 * @param ro        If true, the entry will be created read-only, regardless
162
 * @param ro        If true, the entry will be created read-only, regardless
163
 *          of its w field.
163
 *          of its w field.
164
 */
164
 */
165
void dtlb_pte_copy(pte_t *t, bool ro)
165
void dtlb_pte_copy(pte_t *t, bool ro)
166
{
166
{
167
    tte_data_t data;
167
    tte_data_t data;
168
   
168
   
169
    data.value = 0;
169
    data.value = 0;
170
    data.v = true;
170
    data.v = true;
171
    data.nfo = false;
171
    data.nfo = false;
172
    data.ra = (t->frame) >> FRAME_WIDTH;
172
    data.ra = (t->frame) >> FRAME_WIDTH;
173
    data.ie = false;
173
    data.ie = false;
174
    data.e = false;
174
    data.e = false;
175
    data.cp = t->c;
175
    data.cp = t->c;
176
#ifdef CONFIG_VIRT_IDX_DCACHE
176
#ifdef CONFIG_VIRT_IDX_DCACHE
177
    data.cv = t->c;
177
    data.cv = t->c;
178
#endif
178
#endif
179
    data.p = t->k;
179
    data.p = t->k;
180
    data.x = false;
180
    data.x = false;
181
    data.w = ro ? false : t->w;
181
    data.w = ro ? false : t->w;
182
    data.size = PAGESIZE_8K;
182
    data.size = PAGESIZE_8K;
183
   
183
   
184
    __hypercall_hyperfast(
184
    __hypercall_hyperfast(
185
        t->page, t->as->asid, data.value, MMU_FLAG_DTLB, 0, MMU_MAP_ADDR);
185
        t->page, t->as->asid, data.value, MMU_FLAG_DTLB, 0, MMU_MAP_ADDR);
186
}
186
}
187
 
187
 
188
/** Copy PTE to ITLB.
188
/** Copy PTE to ITLB.
189
 *
189
 *
190
 * @param t     Page Table Entry to be copied.
190
 * @param t     Page Table Entry to be copied.
191
 */
191
 */
192
void itlb_pte_copy(pte_t *t)
192
void itlb_pte_copy(pte_t *t)
193
{
193
{
194
    tte_data_t data;
194
    tte_data_t data;
195
   
195
   
196
    data.value = 0;
196
    data.value = 0;
197
    data.v = true;
197
    data.v = true;
198
    data.nfo = false;
198
    data.nfo = false;
199
    data.ra = (t->frame) >> FRAME_WIDTH;
199
    data.ra = (t->frame) >> FRAME_WIDTH;
200
    data.ie = false;
200
    data.ie = false;
201
    data.e = false;
201
    data.e = false;
202
    data.cp = t->c;
202
    data.cp = t->c;
203
    data.cv = false;
203
    data.cv = false;
204
    data.p = t->k;
204
    data.p = t->k;
205
    data.x = true;
205
    data.x = true;
206
    data.w = false;
206
    data.w = false;
207
    data.size = PAGESIZE_8K;
207
    data.size = PAGESIZE_8K;
208
   
208
   
209
    __hypercall_hyperfast(
209
    __hypercall_hyperfast(
210
        t->page, t->as->asid, data.value, MMU_FLAG_ITLB, 0, MMU_MAP_ADDR);
210
        t->page, t->as->asid, data.value, MMU_FLAG_ITLB, 0, MMU_MAP_ADDR);
211
}
211
}
212
 
212
 
213
/** ITLB miss handler. */
213
/** ITLB miss handler. */
214
void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate)
214
void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate)
215
{
215
{
216
    uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
216
    uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
217
    pte_t *t;
217
    pte_t *t;
218
 
218
 
219
    page_table_lock(AS, true);
219
    page_table_lock(AS, true);
220
    t = page_mapping_find(AS, va);
220
    t = page_mapping_find(AS, va);
221
 
221
 
222
    if (t && PTE_EXECUTABLE(t)) {
222
    if (t && PTE_EXECUTABLE(t)) {
223
        /*
223
        /*
224
         * The mapping was found in the software page hash table.
224
         * The mapping was found in the software page hash table.
225
         * Insert it into ITLB.
225
         * Insert it into ITLB.
226
         */
226
         */
227
        t->a = true;
227
        t->a = true;
228
        itlb_pte_copy(t);
228
        itlb_pte_copy(t);
229
#ifdef CONFIG_TSB
229
#ifdef CONFIG_TSB
230
        itsb_pte_copy(t);
230
        itsb_pte_copy(t);
231
#endif
231
#endif
232
        page_table_unlock(AS, true);
232
        page_table_unlock(AS, true);
233
    } else {
233
    } else {
234
        /*
234
        /*
235
         * Forward the page fault to the address space page fault
235
         * Forward the page fault to the address space page fault
236
         * handler.
236
         * handler.
237
         */    
237
         */    
238
        page_table_unlock(AS, true);
238
        page_table_unlock(AS, true);
239
        if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
239
        if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
240
            do_fast_instruction_access_mmu_miss_fault(istate,
240
            do_fast_instruction_access_mmu_miss_fault(istate,
241
                __func__);
241
                __func__);
242
        }
242
        }
243
    }
243
    }
244
}
244
}
245
 
245
 
246
/** DTLB miss handler.
246
/** DTLB miss handler.
247
 *
247
 *
248
 * Note that some faults (e.g. kernel faults) were already resolved by the
248
 * Note that some faults (e.g. kernel faults) were already resolved by the
249
 * low-level, assembly language part of the fast_data_access_mmu_miss handler.
249
 * low-level, assembly language part of the fast_data_access_mmu_miss handler.
250
 *
250
 *
251
 * @param page_and_ctx  A 64-bit value describing the fault. The most
251
 * @param page_and_ctx  A 64-bit value describing the fault. The most
252
 *          significant 51 bits of the value contain the virtual
252
 *          significant 51 bits of the value contain the virtual
253
 *          address which caused the fault truncated to the page
253
 *          address which caused the fault truncated to the page
254
 *          boundary. The least significant 13 bits of the value
254
 *          boundary. The least significant 13 bits of the value
255
 *          contain the number of the context in which the fault
255
 *          contain the number of the context in which the fault
256
 *          occurred.
256
 *          occurred.
257
 * @param istate    Interrupted state saved on the stack.
257
 * @param istate    Interrupted state saved on the stack.
258
 */
258
 */
259
void fast_data_access_mmu_miss(uint64_t page_and_ctx, istate_t *istate)
259
void fast_data_access_mmu_miss(uint64_t page_and_ctx, istate_t *istate)
260
{
260
{
261
    pte_t *t;
261
    pte_t *t;
262
    uintptr_t va = DMISS_ADDRESS(page_and_ctx);
262
    uintptr_t va = DMISS_ADDRESS(page_and_ctx);
263
    uint16_t ctx = DMISS_CONTEXT(page_and_ctx);
263
    uint16_t ctx = DMISS_CONTEXT(page_and_ctx);
264
 
264
 
265
    if (ctx == ASID_KERNEL) {
265
    if (ctx == ASID_KERNEL) {
266
        if (va == 0) {
266
        if (va == 0) {
267
            /* NULL access in kernel */
267
            /* NULL access in kernel */
268
            do_fast_data_access_mmu_miss_fault(istate, page_and_ctx,
268
            do_fast_data_access_mmu_miss_fault(istate, page_and_ctx,
269
                __func__);
269
                __func__);
270
        }
270
        }
271
        do_fast_data_access_mmu_miss_fault(istate, page_and_ctx, "Unexpected "
271
        do_fast_data_access_mmu_miss_fault(istate, page_and_ctx, "Unexpected "
272
            "kernel page fault.");
272
            "kernel page fault.");
273
    }
273
    }
274
 
274
 
275
    page_table_lock(AS, true);
275
    page_table_lock(AS, true);
276
    t = page_mapping_find(AS, va);
276
    t = page_mapping_find(AS, va);
277
    if (t) {
277
    if (t) {
278
        /*
278
        /*
279
         * The mapping was found in the software page hash table.
279
         * The mapping was found in the software page hash table.
280
         * Insert it into DTLB.
280
         * Insert it into DTLB.
281
         */
281
         */
282
        t->a = true;
282
        t->a = true;
283
        dtlb_pte_copy(t, true);
283
        dtlb_pte_copy(t, true);
284
#ifdef CONFIG_TSB
284
#ifdef CONFIG_TSB
285
        dtsb_pte_copy(t, true);
285
        dtsb_pte_copy(t, true);
286
#endif
286
#endif
287
        page_table_unlock(AS, true);
287
        page_table_unlock(AS, true);
288
    } else {
288
    } else {
289
        /*
289
        /*
290
         * Forward the page fault to the address space page fault
290
         * Forward the page fault to the address space page fault
291
         * handler.
291
         * handler.
292
         */    
292
         */    
293
        page_table_unlock(AS, true);
293
        page_table_unlock(AS, true);
294
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
294
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
295
            do_fast_data_access_mmu_miss_fault(istate, page_and_ctx,
295
            do_fast_data_access_mmu_miss_fault(istate, page_and_ctx,
296
                __func__);
296
                __func__);
297
        }
297
        }
298
    }
298
    }
299
}
299
}
300
 
300
 
301
/** DTLB protection fault handler.
301
/** DTLB protection fault handler.
302
 *
302
 *
303
 * @param page_and_ctx  A 64-bit value describing the fault. The most
303
 * @param page_and_ctx  A 64-bit value describing the fault. The most
304
 *          significant 51 bits of the value contain the virtual
304
 *          significant 51 bits of the value contain the virtual
305
 *          address which caused the fault truncated to the page
305
 *          address which caused the fault truncated to the page
306
 *          boundary. The least significant 13 bits of the value
306
 *          boundary. The least significant 13 bits of the value
307
 *          contain the number of the context in which the fault
307
 *          contain the number of the context in which the fault
308
 *          occurred.
308
 *          occurred.
309
 * @param istate    Interrupted state saved on the stack.
309
 * @param istate    Interrupted state saved on the stack.
310
 */
310
 */
311
void fast_data_access_protection(uint64_t page_and_ctx, istate_t *istate)
311
void fast_data_access_protection(uint64_t page_and_ctx, istate_t *istate)
312
{
312
{
313
    pte_t *t;
313
    pte_t *t;
314
    uintptr_t va = DMISS_ADDRESS(page_and_ctx);
314
    uintptr_t va = DMISS_ADDRESS(page_and_ctx);
315
    uint16_t ctx = DMISS_CONTEXT(page_and_ctx);
315
    uint16_t ctx = DMISS_CONTEXT(page_and_ctx);
316
 
316
 
317
    page_table_lock(AS, true);
317
    page_table_lock(AS, true);
318
    t = page_mapping_find(AS, va);
318
    t = page_mapping_find(AS, va);
319
    if (t && PTE_WRITABLE(t)) {
319
    if (t && PTE_WRITABLE(t)) {
320
        /*
320
        /*
321
         * The mapping was found in the software page hash table and is
321
         * The mapping was found in the software page hash table and is
322
         * writable. Demap the old mapping and insert an updated mapping
322
         * writable. Demap the old mapping and insert an updated mapping
323
         * into DTLB.
323
         * into DTLB.
324
         */
324
         */
325
        t->a = true;
325
        t->a = true;
326
        t->d = true;
326
        t->d = true;
327
        mmu_demap_page(va, ctx, MMU_FLAG_DTLB);
327
        mmu_demap_page(va, ctx, MMU_FLAG_DTLB);
328
        dtlb_pte_copy(t, false);
328
        dtlb_pte_copy(t, false);
329
#ifdef CONFIG_TSB
329
#ifdef CONFIG_TSB
330
        dtsb_pte_copy(t, false);
330
        dtsb_pte_copy(t, false);
331
#endif
331
#endif
332
        page_table_unlock(AS, true);
332
        page_table_unlock(AS, true);
333
    } else {
333
    } else {
334
        /*
334
        /*
335
         * Forward the page fault to the address space page fault
335
         * Forward the page fault to the address space page fault
336
         * handler.
336
         * handler.
337
         */    
337
         */    
338
        page_table_unlock(AS, true);
338
        page_table_unlock(AS, true);
339
        if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
339
        if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
340
            do_fast_data_access_protection_fault(istate, page_and_ctx,
340
            do_fast_data_access_protection_fault(istate, page_and_ctx,
341
                __func__);
341
                __func__);
342
        }
342
        }
343
    }
343
    }
344
}
344
}
345
 
345
 
346
/*
346
/*
347
 * On Niagara this function does not work, as supervisor software is isolated
347
 * On Niagara this function does not work, as supervisor software is isolated
348
 * from the TLB by the hypervisor and has no chance to investigate the TLB
348
 * from the TLB by the hypervisor and has no chance to investigate the TLB
349
 * entries.
349
 * entries.
350
 */
350
 */
351
void tlb_print(void)
351
void tlb_print(void)
352
{
352
{
353
    printf("Operation not possible on Niagara.\n");
353
    printf("Operation not possible on Niagara.\n");
354
}
354
}
355
 
355
 
356
void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,
356
void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,
357
    const char *str)
357
    const char *str)
358
{
358
{
359
    fault_if_from_uspace(istate, "%s\n", str);
359
    fault_if_from_uspace(istate, "%s\n", str);
360
    dump_istate(istate);
360
    dump_istate(istate);
361
    panic("%s\n", str);
361
    panic("%s\n", str);
362
}
362
}
363
 
363
 
364
void do_fast_data_access_mmu_miss_fault(istate_t *istate,
364
void do_fast_data_access_mmu_miss_fault(istate_t *istate,
365
    uint64_t page_and_ctx, const char *str)
365
    uint64_t page_and_ctx, const char *str)
366
{
366
{
367
    if (DMISS_CONTEXT(page_and_ctx)) {
367
    if (DMISS_CONTEXT(page_and_ctx)) {
368
        fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, DMISS_ADDRESS(page_and_ctx),
368
        fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, DMISS_ADDRESS(page_and_ctx),
369
            DMISS_CONTEXT(page_and_ctx));
369
            DMISS_CONTEXT(page_and_ctx));
370
    }
370
    }
371
    dump_istate(istate);
371
    dump_istate(istate);
372
    printf("Faulting page: %p, ASID=%d\n", DMISS_ADDRESS(page_and_ctx), DMISS_CONTEXT(page_and_ctx));
372
    printf("Faulting page: %p, ASID=%d\n", DMISS_ADDRESS(page_and_ctx), DMISS_CONTEXT(page_and_ctx));
373
    panic("%s\n", str);
373
    panic("%s\n", str);
374
}
374
}
375
 
375
 
376
void do_fast_data_access_protection_fault(istate_t *istate,
376
void do_fast_data_access_protection_fault(istate_t *istate,
377
    uint64_t page_and_ctx, const char *str)
377
    uint64_t page_and_ctx, const char *str)
378
{
378
{
379
    if (DMISS_CONTEXT(page_and_ctx)) {
379
    if (DMISS_CONTEXT(page_and_ctx)) {
380
        fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, DMISS_ADDRESS(page_and_ctx),
380
        fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, DMISS_ADDRESS(page_and_ctx),
381
            DMISS_CONTEXT(page_and_ctx));
381
            DMISS_CONTEXT(page_and_ctx));
382
    }
382
    }
383
    printf("Faulting page: %p, ASID=%d\n", DMISS_ADDRESS(page_and_ctx), DMISS_CONTEXT(page_and_ctx));
383
    printf("Faulting page: %p, ASID=%d\n", DMISS_ADDRESS(page_and_ctx), DMISS_CONTEXT(page_and_ctx));
384
    dump_istate(istate);
384
    dump_istate(istate);
385
    panic("%s\n", str);
385
    panic("%s\n", str);
386
}
386
}
387
 
387
 
388
/**
388
/**
389
 * Describes the exact condition which caused the last DMMU fault.
389
 * Describes the exact condition which caused the last DMMU fault.
390
 */
390
 */
391
void describe_dmmu_fault(void)
391
void describe_dmmu_fault(void)
392
{
392
{
393
    uint64_t myid;
393
    uint64_t myid;
394
    __hypercall_fast_ret1(0, 0, 0, 0, 0, CPU_MYID, &myid);
394
    __hypercall_fast_ret1(0, 0, 0, 0, 0, CPU_MYID, &myid);
395
 
395
 
396
    ASSERT(mmu_fsas[myid].dft < 16);
396
    ASSERT(mmu_fsas[myid].dft < 16);
397
 
397
 
398
    printf("condition which caused the fault: %s\n",
398
    printf("condition which caused the fault: %s\n",
399
        fault_types[mmu_fsas[myid].dft]);
399
        fault_types[mmu_fsas[myid].dft]);
400
}
400
}
401
 
401
 
402
/** Invalidate all unlocked ITLB and DTLB entries. */
402
/** Invalidate all unlocked ITLB and DTLB entries. */
403
void tlb_invalidate_all(void)
403
void tlb_invalidate_all(void)
404
{
404
{
405
    uint64_t errno =  __hypercall_fast3(MMU_DEMAP_ALL, 0, 0,
405
    uint64_t errno =  __hypercall_fast3(MMU_DEMAP_ALL, 0, 0,
406
        MMU_FLAG_DTLB | MMU_FLAG_ITLB);
406
        MMU_FLAG_DTLB | MMU_FLAG_ITLB);
407
    if (errno != EOK) {
407
    if (errno != EOK) {
408
        panic("Error code = %d.\n", errno);
408
        panic("Error code = %d.\n", errno);
409
    }
409
    }
410
}
410
}
411
 
411
 
412
/** Invalidate all ITLB and DTLB entries that belong to specified ASID
412
/** Invalidate all ITLB and DTLB entries that belong to specified ASID
413
 * (Context).
413
 * (Context).
414
 *
414
 *
415
 * @param asid Address Space ID.
415
 * @param asid Address Space ID.
416
 */
416
 */
417
void tlb_invalidate_asid(asid_t asid)
417
void tlb_invalidate_asid(asid_t asid)
418
{
418
{
419
    /* switch to nucleus because we are mapped by the primary context */
419
    /* switch to nucleus because we are mapped by the primary context */
420
    nucleus_enter();
420
    nucleus_enter();
421
 
421
 
422
    __hypercall_fast4(MMU_DEMAP_CTX, 0, 0, asid,
422
    __hypercall_fast4(MMU_DEMAP_CTX, 0, 0, asid,
423
        MMU_FLAG_ITLB | MMU_FLAG_DTLB);
423
        MMU_FLAG_ITLB | MMU_FLAG_DTLB);
424
 
424
 
425
    nucleus_leave();
425
    nucleus_leave();
426
}
426
}
427
 
427
 
428
/** Invalidate all ITLB and DTLB entries for specified page range in specified
428
/** Invalidate all ITLB and DTLB entries for specified page range in specified
429
 * address space.
429
 * address space.
430
 *
430
 *
431
 * @param asid      Address Space ID.
431
 * @param asid      Address Space ID.
432
 * @param page      First page which to sweep out from ITLB and DTLB.
432
 * @param page      First page which to sweep out from ITLB and DTLB.
433
 * @param cnt       Number of ITLB and DTLB entries to invalidate.
433
 * @param cnt       Number of ITLB and DTLB entries to invalidate.
434
 */
434
 */
435
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
435
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
436
{
436
{
437
    unsigned int i;
437
    unsigned int i;
438
 
438
 
439
    /* switch to nucleus because we are mapped by the primary context */
439
    /* switch to nucleus because we are mapped by the primary context */
440
    nucleus_enter();
440
    nucleus_enter();
441
 
441
 
442
    for (i = 0; i < cnt; i++) {
442
    for (i = 0; i < cnt; i++) {
443
        __hypercall_fast5(MMU_DEMAP_PAGE, 0, 0, page, asid,
443
        __hypercall_fast5(MMU_DEMAP_PAGE, 0, 0, page, asid,
444
            MMU_FLAG_DTLB | MMU_FLAG_ITLB);
444
            MMU_FLAG_DTLB | MMU_FLAG_ITLB);
445
    }
445
    }
446
 
446
 
447
    nucleus_leave();
447
    nucleus_leave();
448
}
448
}
449
 
449
 
450
/** @}
450
/** @}
451
 */
451
 */
452
 
452