Subversion Repositories HelenOS

Rev

Rev 1891 | Rev 1946 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1891 Rev 1905
1
/*
1
/*
2
 * Copyright (C) 2005 Jakub Jermar
2
 * Copyright (C) 2005 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup sparc64mm  
29
/** @addtogroup sparc64mm  
30
 * @{
30
 * @{
31
 */
31
 */
32
/** @file
32
/** @file
33
 */
33
 */
34
 
34
 
35
#include <arch/mm/tlb.h>
35
#include <arch/mm/tlb.h>
36
#include <mm/tlb.h>
36
#include <mm/tlb.h>
37
#include <mm/as.h>
37
#include <mm/as.h>
38
#include <mm/asid.h>
38
#include <mm/asid.h>
39
#include <arch/mm/frame.h>
39
#include <arch/mm/frame.h>
40
#include <arch/mm/page.h>
40
#include <arch/mm/page.h>
41
#include <arch/mm/mmu.h>
41
#include <arch/mm/mmu.h>
42
#include <arch/interrupt.h>
42
#include <arch/interrupt.h>
43
#include <interrupt.h>
43
#include <interrupt.h>
44
#include <arch.h>
44
#include <arch.h>
45
#include <print.h>
45
#include <print.h>
46
#include <arch/types.h>
46
#include <arch/types.h>
47
#include <typedefs.h>
47
#include <typedefs.h>
48
#include <config.h>
48
#include <config.h>
49
#include <arch/trap/trap.h>
49
#include <arch/trap/trap.h>
50
#include <arch/trap/exception.h>
50
#include <arch/trap/exception.h>
51
#include <panic.h>
51
#include <panic.h>
52
#include <arch/asm.h>
52
#include <arch/asm.h>
53
 
53
 
54
#ifdef CONFIG_TSB
54
#ifdef CONFIG_TSB
55
#include <arch/mm/tsb.h>
55
#include <arch/mm/tsb.h>
56
#endif
56
#endif
57
 
57
 
58
static void dtlb_pte_copy(pte_t *t, bool ro);
58
static void dtlb_pte_copy(pte_t *t, bool ro);
59
static void itlb_pte_copy(pte_t *t);
59
static void itlb_pte_copy(pte_t *t);
60
static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char *str);
60
static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char *str);
61
static void do_fast_data_access_mmu_miss_fault(istate_t *istate, tlb_tag_access_reg_t tag, const char *str);
61
static void do_fast_data_access_mmu_miss_fault(istate_t *istate, tlb_tag_access_reg_t tag, const char *str);
62
static void do_fast_data_access_protection_fault(istate_t *istate, tlb_tag_access_reg_t tag, const char *str);
62
static void do_fast_data_access_protection_fault(istate_t *istate, tlb_tag_access_reg_t tag, const char *str);
63
 
63
 
64
char *context_encoding[] = {
64
char *context_encoding[] = {
65
    "Primary",
65
    "Primary",
66
    "Secondary",
66
    "Secondary",
67
    "Nucleus",
67
    "Nucleus",
68
    "Reserved"
68
    "Reserved"
69
};
69
};
70
 
70
 
71
void tlb_arch_init(void)
71
void tlb_arch_init(void)
72
{
72
{
73
    /*
73
    /*
74
     * TLBs are actually initialized early
74
     * Invalidate all non-locked DTLB and ITLB entries.
75
     * in start.S.
-
 
76
     */
75
     */
-
 
76
    tlb_invalidate_all();
77
}
77
}
78
 
78
 
79
/** Insert privileged mapping into DMMU TLB.
79
/** Insert privileged mapping into DMMU TLB.
80
 *
80
 *
81
 * @param page Virtual page address.
81
 * @param page Virtual page address.
82
 * @param frame Physical frame address.
82
 * @param frame Physical frame address.
83
 * @param pagesize Page size.
83
 * @param pagesize Page size.
84
 * @param locked True for permanent mappings, false otherwise.
84
 * @param locked True for permanent mappings, false otherwise.
85
 * @param cacheable True if the mapping is cacheable, false otherwise.
85
 * @param cacheable True if the mapping is cacheable, false otherwise.
86
 */
86
 */
87
void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize, bool locked, bool cacheable)
87
void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize, bool locked, bool cacheable)
88
{
88
{
89
    tlb_tag_access_reg_t tag;
89
    tlb_tag_access_reg_t tag;
90
    tlb_data_t data;
90
    tlb_data_t data;
91
    page_address_t pg;
91
    page_address_t pg;
92
    frame_address_t fr;
92
    frame_address_t fr;
93
 
93
 
94
    pg.address = page;
94
    pg.address = page;
95
    fr.address = frame;
95
    fr.address = frame;
96
 
96
 
97
    tag.value = ASID_KERNEL;
97
    tag.value = ASID_KERNEL;
98
    tag.vpn = pg.vpn;
98
    tag.vpn = pg.vpn;
99
 
99
 
100
    dtlb_tag_access_write(tag.value);
100
    dtlb_tag_access_write(tag.value);
101
 
101
 
102
    data.value = 0;
102
    data.value = 0;
103
    data.v = true;
103
    data.v = true;
104
    data.size = pagesize;
104
    data.size = pagesize;
105
    data.pfn = fr.pfn;
105
    data.pfn = fr.pfn;
106
    data.l = locked;
106
    data.l = locked;
107
    data.cp = cacheable;
107
    data.cp = cacheable;
108
    data.cv = cacheable;
108
    data.cv = cacheable;
109
    data.p = true;
109
    data.p = true;
110
    data.w = true;
110
    data.w = true;
111
    data.g = false;
111
    data.g = false;
112
 
112
 
113
    dtlb_data_in_write(data.value);
113
    dtlb_data_in_write(data.value);
114
}
114
}
115
 
115
 
116
/** Copy PTE to TLB.
116
/** Copy PTE to TLB.
117
 *
117
 *
118
 * @param t Page Table Entry to be copied.
118
 * @param t Page Table Entry to be copied.
119
 * @param ro If true, the entry will be created read-only, regardless of its w field.
119
 * @param ro If true, the entry will be created read-only, regardless of its w field.
120
 */
120
 */
121
void dtlb_pte_copy(pte_t *t, bool ro)
121
void dtlb_pte_copy(pte_t *t, bool ro)
122
{
122
{
123
    tlb_tag_access_reg_t tag;
123
    tlb_tag_access_reg_t tag;
124
    tlb_data_t data;
124
    tlb_data_t data;
125
    page_address_t pg;
125
    page_address_t pg;
126
    frame_address_t fr;
126
    frame_address_t fr;
127
 
127
 
128
    pg.address = t->page;
128
    pg.address = t->page;
129
    fr.address = t->frame;
129
    fr.address = t->frame;
130
 
130
 
131
    tag.value = 0;
131
    tag.value = 0;
132
    tag.context = t->as->asid;
132
    tag.context = t->as->asid;
133
    tag.vpn = pg.vpn;
133
    tag.vpn = pg.vpn;
134
   
134
   
135
    dtlb_tag_access_write(tag.value);
135
    dtlb_tag_access_write(tag.value);
136
   
136
   
137
    data.value = 0;
137
    data.value = 0;
138
    data.v = true;
138
    data.v = true;
139
    data.size = PAGESIZE_8K;
139
    data.size = PAGESIZE_8K;
140
    data.pfn = fr.pfn;
140
    data.pfn = fr.pfn;
141
    data.l = false;
141
    data.l = false;
142
    data.cp = t->c;
142
    data.cp = t->c;
143
    data.cv = t->c;
143
    data.cv = t->c;
144
    data.p = t->k;      /* p like privileged */
144
    data.p = t->k;      /* p like privileged */
145
    data.w = ro ? false : t->w;
145
    data.w = ro ? false : t->w;
146
    data.g = t->g;
146
    data.g = t->g;
147
   
147
   
148
    dtlb_data_in_write(data.value);
148
    dtlb_data_in_write(data.value);
149
}
149
}
150
 
150
 
151
/** Copy PTE to ITLB.
151
/** Copy PTE to ITLB.
152
 *
152
 *
153
 * @param t Page Table Entry to be copied.
153
 * @param t Page Table Entry to be copied.
154
 */
154
 */
155
void itlb_pte_copy(pte_t *t)
155
void itlb_pte_copy(pte_t *t)
156
{
156
{
157
    tlb_tag_access_reg_t tag;
157
    tlb_tag_access_reg_t tag;
158
    tlb_data_t data;
158
    tlb_data_t data;
159
    page_address_t pg;
159
    page_address_t pg;
160
    frame_address_t fr;
160
    frame_address_t fr;
161
 
161
 
162
    pg.address = t->page;
162
    pg.address = t->page;
163
    fr.address = t->frame;
163
    fr.address = t->frame;
164
 
164
 
165
    tag.value = 0;
165
    tag.value = 0;
166
    tag.context = t->as->asid;
166
    tag.context = t->as->asid;
167
    tag.vpn = pg.vpn;
167
    tag.vpn = pg.vpn;
168
   
168
   
169
    itlb_tag_access_write(tag.value);
169
    itlb_tag_access_write(tag.value);
170
   
170
   
171
    data.value = 0;
171
    data.value = 0;
172
    data.v = true;
172
    data.v = true;
173
    data.size = PAGESIZE_8K;
173
    data.size = PAGESIZE_8K;
174
    data.pfn = fr.pfn;
174
    data.pfn = fr.pfn;
175
    data.l = false;
175
    data.l = false;
176
    data.cp = t->c;
176
    data.cp = t->c;
177
    data.cv = t->c;
177
    data.cv = t->c;
178
    data.p = t->k;      /* p like privileged */
178
    data.p = t->k;      /* p like privileged */
179
    data.w = false;
179
    data.w = false;
180
    data.g = t->g;
180
    data.g = t->g;
181
   
181
   
182
    itlb_data_in_write(data.value);
182
    itlb_data_in_write(data.value);
183
}
183
}
184
 
184
 
185
/** ITLB miss handler. */
185
/** ITLB miss handler. */
186
void fast_instruction_access_mmu_miss(int n, istate_t *istate)
186
void fast_instruction_access_mmu_miss(int n, istate_t *istate)
187
{
187
{
188
    uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
188
    uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
189
    pte_t *t;
189
    pte_t *t;
190
 
190
 
191
    page_table_lock(AS, true);
191
    page_table_lock(AS, true);
192
    t = page_mapping_find(AS, va);
192
    t = page_mapping_find(AS, va);
193
    if (t && PTE_EXECUTABLE(t)) {
193
    if (t && PTE_EXECUTABLE(t)) {
194
        /*
194
        /*
195
         * The mapping was found in the software page hash table.
195
         * The mapping was found in the software page hash table.
196
         * Insert it into ITLB.
196
         * Insert it into ITLB.
197
         */
197
         */
198
        t->a = true;
198
        t->a = true;
199
        itlb_pte_copy(t);
199
        itlb_pte_copy(t);
200
#ifdef CONFIG_TSB
200
#ifdef CONFIG_TSB
201
        itsb_pte_copy(t);
201
        itsb_pte_copy(t);
202
#endif
202
#endif
203
        page_table_unlock(AS, true);
203
        page_table_unlock(AS, true);
204
    } else {
204
    } else {
205
        /*
205
        /*
206
         * Forward the page fault to the address space page fault handler.
206
         * Forward the page fault to the address space page fault handler.
207
         */    
207
         */    
208
        page_table_unlock(AS, true);
208
        page_table_unlock(AS, true);
209
        if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
209
        if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
210
            do_fast_instruction_access_mmu_miss_fault(istate, __FUNCTION__);
210
            do_fast_instruction_access_mmu_miss_fault(istate, __FUNCTION__);
211
        }
211
        }
212
    }
212
    }
213
}
213
}
214
 
214
 
215
/** DTLB miss handler.
215
/** DTLB miss handler.
216
 *
216
 *
217
 * Note that some faults (e.g. kernel faults) were already resolved
217
 * Note that some faults (e.g. kernel faults) were already resolved
218
 * by the low-level, assembly language part of the fast_data_access_mmu_miss
218
 * by the low-level, assembly language part of the fast_data_access_mmu_miss
219
 * handler.
219
 * handler.
220
 */
220
 */
221
void fast_data_access_mmu_miss(int n, istate_t *istate)
221
void fast_data_access_mmu_miss(int n, istate_t *istate)
222
{
222
{
223
    tlb_tag_access_reg_t tag;
223
    tlb_tag_access_reg_t tag;
224
    uintptr_t va;
224
    uintptr_t va;
225
    pte_t *t;
225
    pte_t *t;
226
 
226
 
227
    tag.value = dtlb_tag_access_read();
227
    tag.value = dtlb_tag_access_read();
228
    va = tag.vpn << PAGE_WIDTH;
228
    va = tag.vpn << PAGE_WIDTH;
229
 
229
 
230
    if (tag.context == ASID_KERNEL) {
230
    if (tag.context == ASID_KERNEL) {
231
        if (!tag.vpn) {
231
        if (!tag.vpn) {
232
            /* NULL access in kernel */
232
            /* NULL access in kernel */
233
            do_fast_data_access_mmu_miss_fault(istate, tag, __FUNCTION__);
233
            do_fast_data_access_mmu_miss_fault(istate, tag, __FUNCTION__);
234
        }
234
        }
235
        do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected kernel page fault.");
235
        do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected kernel page fault.");
236
    }
236
    }
237
 
237
 
238
    page_table_lock(AS, true);
238
    page_table_lock(AS, true);
239
    t = page_mapping_find(AS, va);
239
    t = page_mapping_find(AS, va);
240
    if (t) {
240
    if (t) {
241
        /*
241
        /*
242
         * The mapping was found in the software page hash table.
242
         * The mapping was found in the software page hash table.
243
         * Insert it into DTLB.
243
         * Insert it into DTLB.
244
         */
244
         */
245
        t->a = true;
245
        t->a = true;
246
        dtlb_pte_copy(t, true);
246
        dtlb_pte_copy(t, true);
247
#ifdef CONFIG_TSB
247
#ifdef CONFIG_TSB
248
        dtsb_pte_copy(t, true);
248
        dtsb_pte_copy(t, true);
249
#endif
249
#endif
250
        page_table_unlock(AS, true);
250
        page_table_unlock(AS, true);
251
    } else {
251
    } else {
252
        /*
252
        /*
253
         * Forward the page fault to the address space page fault handler.
253
         * Forward the page fault to the address space page fault handler.
254
         */    
254
         */    
255
        page_table_unlock(AS, true);
255
        page_table_unlock(AS, true);
256
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
256
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
257
            do_fast_data_access_mmu_miss_fault(istate, tag, __FUNCTION__);
257
            do_fast_data_access_mmu_miss_fault(istate, tag, __FUNCTION__);
258
        }
258
        }
259
    }
259
    }
260
}
260
}
261
 
261
 
262
/** DTLB protection fault handler. */
262
/** DTLB protection fault handler. */
263
void fast_data_access_protection(int n, istate_t *istate)
263
void fast_data_access_protection(int n, istate_t *istate)
264
{
264
{
265
    tlb_tag_access_reg_t tag;
265
    tlb_tag_access_reg_t tag;
266
    uintptr_t va;
266
    uintptr_t va;
267
    pte_t *t;
267
    pte_t *t;
268
 
268
 
269
    tag.value = dtlb_tag_access_read();
269
    tag.value = dtlb_tag_access_read();
270
    va = tag.vpn << PAGE_WIDTH;
270
    va = tag.vpn << PAGE_WIDTH;
271
 
271
 
272
    page_table_lock(AS, true);
272
    page_table_lock(AS, true);
273
    t = page_mapping_find(AS, va);
273
    t = page_mapping_find(AS, va);
274
    if (t && PTE_WRITABLE(t)) {
274
    if (t && PTE_WRITABLE(t)) {
275
        /*
275
        /*
276
         * The mapping was found in the software page hash table and is writable.
276
         * The mapping was found in the software page hash table and is writable.
277
         * Demap the old mapping and insert an updated mapping into DTLB.
277
         * Demap the old mapping and insert an updated mapping into DTLB.
278
         */
278
         */
279
        t->a = true;
279
        t->a = true;
280
        t->d = true;
280
        t->d = true;
281
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY, va);
281
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY, va);
282
        dtlb_pte_copy(t, false);
282
        dtlb_pte_copy(t, false);
283
#ifdef CONFIG_TSB
283
#ifdef CONFIG_TSB
284
        dtsb_pte_copy(t, false);
284
        dtsb_pte_copy(t, false);
285
#endif
285
#endif
286
        page_table_unlock(AS, true);
286
        page_table_unlock(AS, true);
287
    } else {
287
    } else {
288
        /*
288
        /*
289
         * Forward the page fault to the address space page fault handler.
289
         * Forward the page fault to the address space page fault handler.
290
         */    
290
         */    
291
        page_table_unlock(AS, true);
291
        page_table_unlock(AS, true);
292
        if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
292
        if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
293
            do_fast_data_access_protection_fault(istate, tag, __FUNCTION__);
293
            do_fast_data_access_protection_fault(istate, tag, __FUNCTION__);
294
        }
294
        }
295
    }
295
    }
296
}
296
}
297
 
297
 
298
/** Print contents of both TLBs. */
298
/** Print contents of both TLBs. */
299
void tlb_print(void)
299
void tlb_print(void)
300
{
300
{
301
    int i;
301
    int i;
302
    tlb_data_t d;
302
    tlb_data_t d;
303
    tlb_tag_read_reg_t t;
303
    tlb_tag_read_reg_t t;
304
   
304
   
305
    printf("I-TLB contents:\n");
305
    printf("I-TLB contents:\n");
306
    for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
306
    for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
307
        d.value = itlb_data_access_read(i);
307
        d.value = itlb_data_access_read(i);
308
        t.value = itlb_tag_read_read(i);
308
        t.value = itlb_tag_read_read(i);
309
       
309
       
310
        printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n",
310
        printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n",
311
            i, t.vpn, t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag, d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
311
            i, t.vpn, t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag, d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
312
    }
312
    }
313
 
313
 
314
    printf("D-TLB contents:\n");
314
    printf("D-TLB contents:\n");
315
    for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
315
    for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
316
        d.value = dtlb_data_access_read(i);
316
        d.value = dtlb_data_access_read(i);
317
        t.value = dtlb_tag_read_read(i);
317
        t.value = dtlb_tag_read_read(i);
318
       
318
       
319
        printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n",
319
        printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n",
320
            i, t.vpn, t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag, d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
320
            i, t.vpn, t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag, d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
321
    }
321
    }
322
 
322
 
323
}
323
}
324
 
324
 
325
void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char *str)
325
void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char *str)
326
{
326
{
327
    fault_if_from_uspace(istate, "%s\n", str);
327
    fault_if_from_uspace(istate, "%s\n", str);
328
    dump_istate(istate);
328
    dump_istate(istate);
329
    panic("%s\n", str);
329
    panic("%s\n", str);
330
}
330
}
331
 
331
 
332
void do_fast_data_access_mmu_miss_fault(istate_t *istate, tlb_tag_access_reg_t tag, const char *str)
332
void do_fast_data_access_mmu_miss_fault(istate_t *istate, tlb_tag_access_reg_t tag, const char *str)
333
{
333
{
334
    uintptr_t va;
334
    uintptr_t va;
335
 
335
 
336
    va = tag.vpn << PAGE_WIDTH;
336
    va = tag.vpn << PAGE_WIDTH;
337
 
337
 
338
    fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va, tag.context);
338
    fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va, tag.context);
339
    dump_istate(istate);
339
    dump_istate(istate);
340
    printf("Faulting page: %p, ASID=%d\n", va, tag.context);
340
    printf("Faulting page: %p, ASID=%d\n", va, tag.context);
341
    panic("%s\n", str);
341
    panic("%s\n", str);
342
}
342
}
343
 
343
 
344
void do_fast_data_access_protection_fault(istate_t *istate, tlb_tag_access_reg_t tag, const char *str)
344
void do_fast_data_access_protection_fault(istate_t *istate, tlb_tag_access_reg_t tag, const char *str)
345
{
345
{
346
    uintptr_t va;
346
    uintptr_t va;
347
 
347
 
348
    va = tag.vpn << PAGE_WIDTH;
348
    va = tag.vpn << PAGE_WIDTH;
349
 
349
 
350
    fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va, tag.context);
350
    fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va, tag.context);
351
    printf("Faulting page: %p, ASID=%d\n", va, tag.context);
351
    printf("Faulting page: %p, ASID=%d\n", va, tag.context);
352
    dump_istate(istate);
352
    dump_istate(istate);
353
    panic("%s\n", str);
353
    panic("%s\n", str);
354
}
354
}
355
 
355
 
356
/** Invalidate all unlocked ITLB and DTLB entries. */
356
/** Invalidate all unlocked ITLB and DTLB entries. */
357
void tlb_invalidate_all(void)
357
void tlb_invalidate_all(void)
358
{
358
{
359
    int i;
359
    int i;
360
    tlb_data_t d;
360
    tlb_data_t d;
361
    tlb_tag_read_reg_t t;
361
    tlb_tag_read_reg_t t;
362
 
362
 
363
    for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
363
    for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
364
        d.value = itlb_data_access_read(i);
364
        d.value = itlb_data_access_read(i);
365
        if (!d.l) {
365
        if (!d.l) {
366
            t.value = itlb_tag_read_read(i);
366
            t.value = itlb_tag_read_read(i);
367
            d.v = false;
367
            d.v = false;
368
            itlb_tag_access_write(t.value);
368
            itlb_tag_access_write(t.value);
369
            itlb_data_access_write(i, d.value);
369
            itlb_data_access_write(i, d.value);
370
        }
370
        }
371
    }
371
    }
372
   
372
   
373
    for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
373
    for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
374
        d.value = dtlb_data_access_read(i);
374
        d.value = dtlb_data_access_read(i);
375
        if (!d.l) {
375
        if (!d.l) {
376
            t.value = dtlb_tag_read_read(i);
376
            t.value = dtlb_tag_read_read(i);
377
            d.v = false;
377
            d.v = false;
378
            dtlb_tag_access_write(t.value);
378
            dtlb_tag_access_write(t.value);
379
            dtlb_data_access_write(i, d.value);
379
            dtlb_data_access_write(i, d.value);
380
        }
380
        }
381
    }
381
    }
382
   
382
   
383
}
383
}
384
 
384
 
385
/** Invalidate all ITLB and DTLB entries that belong to specified ASID (Context).
385
/** Invalidate all ITLB and DTLB entries that belong to specified ASID (Context).
386
 *
386
 *
387
 * @param asid Address Space ID.
387
 * @param asid Address Space ID.
388
 */
388
 */
389
void tlb_invalidate_asid(asid_t asid)
389
void tlb_invalidate_asid(asid_t asid)
390
{
390
{
391
    tlb_context_reg_t pc_save, ctx;
391
    tlb_context_reg_t pc_save, ctx;
392
   
392
   
393
    /* switch to nucleus because we are mapped by the primary context */
393
    /* switch to nucleus because we are mapped by the primary context */
394
    nucleus_enter();
394
    nucleus_enter();
395
   
395
   
396
    ctx.v = pc_save.v = mmu_primary_context_read();
396
    ctx.v = pc_save.v = mmu_primary_context_read();
397
    ctx.context = asid;
397
    ctx.context = asid;
398
    mmu_primary_context_write(ctx.v);
398
    mmu_primary_context_write(ctx.v);
399
   
399
   
400
    itlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
400
    itlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
401
    dtlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
401
    dtlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0);
402
   
402
   
403
    mmu_primary_context_write(pc_save.v);
403
    mmu_primary_context_write(pc_save.v);
404
   
404
   
405
    nucleus_leave();
405
    nucleus_leave();
406
}
406
}
407
 
407
 
408
/** Invalidate all ITLB and DTLB entries for specified page range in specified address space.
408
/** Invalidate all ITLB and DTLB entries for specified page range in specified address space.
409
 *
409
 *
410
 * @param asid Address Space ID.
410
 * @param asid Address Space ID.
411
 * @param page First page which to sweep out from ITLB and DTLB.
411
 * @param page First page which to sweep out from ITLB and DTLB.
412
 * @param cnt Number of ITLB and DTLB entries to invalidate.
412
 * @param cnt Number of ITLB and DTLB entries to invalidate.
413
 */
413
 */
414
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
414
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
415
{
415
{
416
    int i;
416
    int i;
417
    tlb_context_reg_t pc_save, ctx;
417
    tlb_context_reg_t pc_save, ctx;
418
   
418
   
419
    /* switch to nucleus because we are mapped by the primary context */
419
    /* switch to nucleus because we are mapped by the primary context */
420
    nucleus_enter();
420
    nucleus_enter();
421
   
421
   
422
    ctx.v = pc_save.v = mmu_primary_context_read();
422
    ctx.v = pc_save.v = mmu_primary_context_read();
423
    ctx.context = asid;
423
    ctx.context = asid;
424
    mmu_primary_context_write(ctx.v);
424
    mmu_primary_context_write(ctx.v);
425
   
425
   
426
    for (i = 0; i < cnt; i++) {
426
    for (i = 0; i < cnt; i++) {
427
        itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, page + i * PAGE_SIZE);
427
        itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, page + i * PAGE_SIZE);
428
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, page + i * PAGE_SIZE);
428
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, page + i * PAGE_SIZE);
429
    }
429
    }
430
   
430
   
431
    mmu_primary_context_write(pc_save.v);
431
    mmu_primary_context_write(pc_save.v);
432
   
432
   
433
    nucleus_leave();
433
    nucleus_leave();
434
}
434
}
435
 
435
 
436
/** @}
436
/** @}
437
 */
437
 */
438
 
438