Subversion Repositories HelenOS

Rev

Rev 1852 | Rev 1860 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1852 Rev 1859
1
/*
1
/*
2
 * Copyright (C) 2005 Jakub Jermar
2
 * Copyright (C) 2005 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup sparc64mm  
29
/** @addtogroup sparc64mm  
30
 * @{
30
 * @{
31
 */
31
 */
32
/** @file
32
/** @file
33
 */
33
 */
34
 
34
 
35
#include <arch/mm/tlb.h>
35
#include <arch/mm/tlb.h>
36
#include <mm/tlb.h>
36
#include <mm/tlb.h>
37
#include <mm/as.h>
37
#include <mm/as.h>
38
#include <mm/asid.h>
38
#include <mm/asid.h>
39
#include <arch/mm/frame.h>
39
#include <arch/mm/frame.h>
40
#include <arch/mm/page.h>
40
#include <arch/mm/page.h>
41
#include <arch/mm/mmu.h>
41
#include <arch/mm/mmu.h>
42
#include <arch/interrupt.h>
42
#include <arch/interrupt.h>
43
#include <arch.h>
43
#include <arch.h>
44
#include <print.h>
44
#include <print.h>
45
#include <arch/types.h>
45
#include <arch/types.h>
46
#include <typedefs.h>
46
#include <typedefs.h>
47
#include <config.h>
47
#include <config.h>
48
#include <arch/trap/trap.h>
48
#include <arch/trap/trap.h>
49
#include <panic.h>
49
#include <panic.h>
50
#include <arch/asm.h>
50
#include <arch/asm.h>
51
#include <symtab.h>
51
#include <symtab.h>
52
 
52
 
53
static void dtlb_pte_copy(pte_t *t, bool ro);
53
static void dtlb_pte_copy(pte_t *t, bool ro);
54
static void itlb_pte_copy(pte_t *t);
54
static void itlb_pte_copy(pte_t *t);
55
static void do_fast_data_access_mmu_miss_fault(istate_t *istate, const char *str);
55
static void do_fast_data_access_mmu_miss_fault(istate_t *istate, const char *str);
56
static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char *str);
56
static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char *str);
-
 
57
static void do_fast_data_access_protection_fault(istate_t *istate, const char *str);
57
 
58
 
58
char *context_encoding[] = {
59
char *context_encoding[] = {
59
    "Primary",
60
    "Primary",
60
    "Secondary",
61
    "Secondary",
61
    "Nucleus",
62
    "Nucleus",
62
    "Reserved"
63
    "Reserved"
63
};
64
};
64
 
65
 
65
void tlb_arch_init(void)
66
void tlb_arch_init(void)
66
{
67
{
67
    /*
68
    /*
68
     * TLBs are actually initialized early
69
     * TLBs are actually initialized early
69
     * in start.S.
70
     * in start.S.
70
     */
71
     */
71
}
72
}
72
 
73
 
73
/** Insert privileged mapping into DMMU TLB.
74
/** Insert privileged mapping into DMMU TLB.
74
 *
75
 *
75
 * @param page Virtual page address.
76
 * @param page Virtual page address.
76
 * @param frame Physical frame address.
77
 * @param frame Physical frame address.
77
 * @param pagesize Page size.
78
 * @param pagesize Page size.
78
 * @param locked True for permanent mappings, false otherwise.
79
 * @param locked True for permanent mappings, false otherwise.
79
 * @param cacheable True if the mapping is cacheable, false otherwise.
80
 * @param cacheable True if the mapping is cacheable, false otherwise.
80
 */
81
 */
81
void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize, bool locked, bool cacheable)
82
void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize, bool locked, bool cacheable)
82
{
83
{
83
    tlb_tag_access_reg_t tag;
84
    tlb_tag_access_reg_t tag;
84
    tlb_data_t data;
85
    tlb_data_t data;
85
    page_address_t pg;
86
    page_address_t pg;
86
    frame_address_t fr;
87
    frame_address_t fr;
87
 
88
 
88
    pg.address = page;
89
    pg.address = page;
89
    fr.address = frame;
90
    fr.address = frame;
90
 
91
 
91
    tag.value = ASID_KERNEL;
92
    tag.value = ASID_KERNEL;
92
    tag.vpn = pg.vpn;
93
    tag.vpn = pg.vpn;
93
 
94
 
94
    dtlb_tag_access_write(tag.value);
95
    dtlb_tag_access_write(tag.value);
95
 
96
 
96
    data.value = 0;
97
    data.value = 0;
97
    data.v = true;
98
    data.v = true;
98
    data.size = pagesize;
99
    data.size = pagesize;
99
    data.pfn = fr.pfn;
100
    data.pfn = fr.pfn;
100
    data.l = locked;
101
    data.l = locked;
101
    data.cp = cacheable;
102
    data.cp = cacheable;
102
    data.cv = cacheable;
103
    data.cv = cacheable;
103
    data.p = true;
104
    data.p = true;
104
    data.w = true;
105
    data.w = true;
105
    data.g = true;
106
    data.g = true;
106
 
107
 
107
    dtlb_data_in_write(data.value);
108
    dtlb_data_in_write(data.value);
108
}
109
}
109
 
110
 
110
/** Copy PTE to TLB.
111
/** Copy PTE to TLB.
111
 *
112
 *
112
 * @param t Page Table Entry to be copied.
113
 * @param t Page Table Entry to be copied.
113
 * @param ro If true, the entry will be created read-only, regardless of its w field.
114
 * @param ro If true, the entry will be created read-only, regardless of its w field.
114
 */
115
 */
115
void dtlb_pte_copy(pte_t *t, bool ro)
116
void dtlb_pte_copy(pte_t *t, bool ro)
116
{
117
{
117
    tlb_tag_access_reg_t tag;
118
    tlb_tag_access_reg_t tag;
118
    tlb_data_t data;
119
    tlb_data_t data;
119
    page_address_t pg;
120
    page_address_t pg;
120
    frame_address_t fr;
121
    frame_address_t fr;
121
 
122
 
122
    pg.address = t->page;
123
    pg.address = t->page;
123
    fr.address = t->frame;
124
    fr.address = t->frame;
124
 
125
 
125
    tag.value = 0;
126
    tag.value = 0;
126
    tag.context = t->as->asid;
127
    tag.context = t->as->asid;
127
    tag.vpn = pg.vpn;
128
    tag.vpn = pg.vpn;
128
   
129
   
129
    dtlb_tag_access_write(tag.value);
130
    dtlb_tag_access_write(tag.value);
130
   
131
   
131
    data.value = 0;
132
    data.value = 0;
132
    data.v = true;
133
    data.v = true;
133
    data.size = PAGESIZE_8K;
134
    data.size = PAGESIZE_8K;
134
    data.pfn = fr.pfn;
135
    data.pfn = fr.pfn;
135
    data.l = false;
136
    data.l = false;
136
    data.cp = t->c;
137
    data.cp = t->c;
137
    data.cv = t->c;
138
    data.cv = t->c;
138
    data.p = t->p;
139
    data.p = t->p;
139
    data.w = ro ? false : t->w;
140
    data.w = ro ? false : t->w;
140
    data.g = t->g;
141
    data.g = t->g;
141
   
142
   
142
    dtlb_data_in_write(data.value);
143
    dtlb_data_in_write(data.value);
143
}
144
}
144
 
145
 
145
void itlb_pte_copy(pte_t *t)
146
void itlb_pte_copy(pte_t *t)
146
{
147
{
147
    tlb_tag_access_reg_t tag;
148
    tlb_tag_access_reg_t tag;
148
    tlb_data_t data;
149
    tlb_data_t data;
149
    page_address_t pg;
150
    page_address_t pg;
150
    frame_address_t fr;
151
    frame_address_t fr;
151
 
152
 
152
    pg.address = t->page;
153
    pg.address = t->page;
153
    fr.address = t->frame;
154
    fr.address = t->frame;
154
 
155
 
155
    tag.value = 0;
156
    tag.value = 0;
156
    tag.context = t->as->asid;
157
    tag.context = t->as->asid;
157
    tag.vpn = pg.vpn;
158
    tag.vpn = pg.vpn;
158
   
159
   
159
    itlb_tag_access_write(tag.value);
160
    itlb_tag_access_write(tag.value);
160
   
161
   
161
    data.value = 0;
162
    data.value = 0;
162
    data.v = true;
163
    data.v = true;
163
    data.size = PAGESIZE_8K;
164
    data.size = PAGESIZE_8K;
164
    data.pfn = fr.pfn;
165
    data.pfn = fr.pfn;
165
    data.l = false;
166
    data.l = false;
166
    data.cp = t->c;
167
    data.cp = t->c;
167
    data.cv = t->c;
168
    data.cv = t->c;
168
    data.p = t->p;
169
    data.p = t->p;
169
    data.w = false;
170
    data.w = false;
170
    data.g = t->g;
171
    data.g = t->g;
171
   
172
   
172
    itlb_data_in_write(data.value);
173
    itlb_data_in_write(data.value);
173
}
174
}
174
 
175
 
175
/** ITLB miss handler. */
176
/** ITLB miss handler. */
176
void fast_instruction_access_mmu_miss(int n, istate_t *istate)
177
void fast_instruction_access_mmu_miss(int n, istate_t *istate)
177
{
178
{
178
    uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
179
    uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
179
    pte_t *t;
180
    pte_t *t;
180
 
181
 
181
    page_table_lock(AS, true);
182
    page_table_lock(AS, true);
182
    t = page_mapping_find(AS, va);
183
    t = page_mapping_find(AS, va);
183
    if (t && PTE_EXECUTABLE(t)) {
184
    if (t && PTE_EXECUTABLE(t)) {
184
        /*
185
        /*
185
         * The mapping was found in the software page hash table.
186
         * The mapping was found in the software page hash table.
186
         * Insert it into ITLB.
187
         * Insert it into ITLB.
187
         */
188
         */
188
        t->a = true;
189
        t->a = true;
189
        itlb_pte_copy(t);
190
        itlb_pte_copy(t);
190
        page_table_unlock(AS, true);
191
        page_table_unlock(AS, true);
191
    } else {
192
    } else {
192
        /*
193
        /*
193
         * Forward the page fault to the address space page fault handler.
194
         * Forward the page fault to the address space page fault handler.
194
         */    
195
         */    
195
        page_table_unlock(AS, true);
196
        page_table_unlock(AS, true);
196
        if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
197
        if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
197
            do_fast_instruction_access_mmu_miss_fault(istate, __FUNCTION__);
198
            do_fast_instruction_access_mmu_miss_fault(istate, __FUNCTION__);
198
        }
199
        }
199
    }
200
    }
200
}
201
}
201
 
202
 
202
/** DTLB miss handler.
203
/** DTLB miss handler.
203
 *
204
 *
204
 * Note that some faults (e.g. kernel faults) were already resolved
205
 * Note that some faults (e.g. kernel faults) were already resolved
205
 * by the low-level, assembly language part of the fast_data_access_mmu_miss
206
 * by the low-level, assembly language part of the fast_data_access_mmu_miss
206
 * handler.
207
 * handler.
207
 */
208
 */
208
void fast_data_access_mmu_miss(int n, istate_t *istate)
209
void fast_data_access_mmu_miss(int n, istate_t *istate)
209
{
210
{
210
    tlb_tag_access_reg_t tag;
211
    tlb_tag_access_reg_t tag;
211
    uintptr_t va;
212
    uintptr_t va;
212
    pte_t *t;
213
    pte_t *t;
213
 
214
 
214
    tag.value = dtlb_tag_access_read();
215
    tag.value = dtlb_tag_access_read();
215
    va = tag.vpn * PAGE_SIZE;
216
    va = tag.vpn * PAGE_SIZE;
216
    if (tag.context == ASID_KERNEL) {
217
    if (tag.context == ASID_KERNEL) {
217
        if (!tag.vpn) {
218
        if (!tag.vpn) {
218
            /* NULL access in kernel */
219
            /* NULL access in kernel */
219
            do_fast_data_access_mmu_miss_fault(istate, __FUNCTION__);
220
            do_fast_data_access_mmu_miss_fault(istate, __FUNCTION__);
220
        }
221
        }
221
        do_fast_data_access_mmu_miss_fault(istate, "Unexpected kernel page fault.");
222
        do_fast_data_access_mmu_miss_fault(istate, "Unexpected kernel page fault.");
222
    }
223
    }
223
 
224
 
224
    page_table_lock(AS, true);
225
    page_table_lock(AS, true);
225
    t = page_mapping_find(AS, va);
226
    t = page_mapping_find(AS, va);
226
    if (t) {
227
    if (t) {
227
        /*
228
        /*
228
         * The mapping was found in the software page hash table.
229
         * The mapping was found in the software page hash table.
229
         * Insert it into DTLB.
230
         * Insert it into DTLB.
230
         */
231
         */
231
        t->a = true;
232
        t->a = true;
232
        dtlb_pte_copy(t, true);
233
        dtlb_pte_copy(t, true);
233
        page_table_unlock(AS, true);
234
        page_table_unlock(AS, true);
234
    } else {
235
    } else {
235
        /*
236
        /*
236
         * Forward the page fault to the address space page fault handler.
237
         * Forward the page fault to the address space page fault handler.
237
         */    
238
         */    
238
        page_table_unlock(AS, true);
239
        page_table_unlock(AS, true);
239
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
240
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
240
            do_fast_data_access_mmu_miss_fault(istate, __FUNCTION__);
241
            do_fast_data_access_mmu_miss_fault(istate, __FUNCTION__);
241
        }
242
        }
242
    }
243
    }
243
}
244
}
244
 
245
 
245
/** DTLB protection fault handler. */
246
/** DTLB protection fault handler. */
246
void fast_data_access_protection(int n, istate_t *istate)
247
void fast_data_access_protection(int n, istate_t *istate)
247
{
248
{
-
 
249
    tlb_tag_access_reg_t tag;
-
 
250
    uintptr_t va;
-
 
251
    pte_t *t;
-
 
252
 
-
 
253
    tag.value = dtlb_tag_access_read();
-
 
254
    va = tag.vpn * PAGE_SIZE;
-
 
255
 
-
 
256
    page_table_lock(AS, true);
-
 
257
    t = page_mapping_find(AS, va);
-
 
258
    if (t && PTE_WRITABLE(t)) {
-
 
259
        /*
-
 
260
         * The mapping was found in the software page hash table and is writable.
-
 
261
         * Demap the old mapping and insert an updated mapping into DTLB.
-
 
262
         */
-
 
263
        t->a = true;
-
 
264
        t->d = true;
-
 
265
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY, va);
-
 
266
        dtlb_pte_copy(t, false);
248
    panic("%s\n", __FUNCTION__);
267
        page_table_unlock(AS, true);
-
 
268
    } else {
-
 
269
        /*
-
 
270
         * Forward the page fault to the address space page fault handler.
-
 
271
         */    
-
 
272
        page_table_unlock(AS, true);
-
 
273
        if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
-
 
274
            do_fast_data_access_protection_fault(istate, __FUNCTION__);
-
 
275
        }
-
 
276
    }
249
}
277
}
250
 
278
 
251
/** Print contents of both TLBs. */
279
/** Print contents of both TLBs. */
252
void tlb_print(void)
280
void tlb_print(void)
253
{
281
{
254
    int i;
282
    int i;
255
    tlb_data_t d;
283
    tlb_data_t d;
256
    tlb_tag_read_reg_t t;
284
    tlb_tag_read_reg_t t;
257
   
285
   
258
    printf("I-TLB contents:\n");
286
    printf("I-TLB contents:\n");
259
    for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
287
    for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
260
        d.value = itlb_data_access_read(i);
288
        d.value = itlb_data_access_read(i);
261
        t.value = itlb_tag_read_read(i);
289
        t.value = itlb_tag_read_read(i);
262
       
290
       
263
        printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n",
291
        printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n",
264
            i, t.vpn, t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag, d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
292
            i, t.vpn, t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag, d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
265
    }
293
    }
266
 
294
 
267
    printf("D-TLB contents:\n");
295
    printf("D-TLB contents:\n");
268
    for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
296
    for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
269
        d.value = dtlb_data_access_read(i);
297
        d.value = dtlb_data_access_read(i);
270
        t.value = dtlb_tag_read_read(i);
298
        t.value = dtlb_tag_read_read(i);
271
       
299
       
272
        printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n",
300
        printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n",
273
            i, t.vpn, t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag, d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
301
            i, t.vpn, t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag, d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
274
    }
302
    }
275
 
303
 
276
}
304
}
277
 
305
 
278
void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char *str)
306
void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char *str)
279
{
307
{
280
    char *tpc_str = get_symtab_entry(istate->tpc);
308
    char *tpc_str = get_symtab_entry(istate->tpc);
281
 
309
 
282
    printf("TPC=%p, (%s)\n", istate->tpc, tpc_str);
310
    printf("TPC=%p, (%s)\n", istate->tpc, tpc_str);
283
    panic("%s\n", str);
311
    panic("%s\n", str);
284
}
312
}
285
 
313
 
286
void do_fast_data_access_mmu_miss_fault(istate_t *istate, const char *str)
314
void do_fast_data_access_mmu_miss_fault(istate_t *istate, const char *str)
287
{
315
{
-
 
316
    tlb_tag_access_reg_t tag;
-
 
317
    uintptr_t va;
-
 
318
    char *tpc_str = get_symtab_entry(istate->tpc);
-
 
319
 
-
 
320
    tag.value = dtlb_tag_access_read();
-
 
321
    va = tag.vpn * PAGE_SIZE;
-
 
322
 
-
 
323
    printf("Faulting page: %p, ASID=%d\n", va, tag.context);
-
 
324
    printf("TPC=%p, (%s)\n", istate->tpc, tpc_str);
-
 
325
    panic("%s\n", str);
-
 
326
}
-
 
327
 
-
 
328
void do_fast_data_access_protection_fault(istate_t *istate, const char *str)
-
 
329
{
288
    tlb_tag_access_reg_t tag;
330
    tlb_tag_access_reg_t tag;
289
    uintptr_t va;
331
    uintptr_t va;
290
    char *tpc_str = get_symtab_entry(istate->tpc);
332
    char *tpc_str = get_symtab_entry(istate->tpc);
291
 
333
 
292
    tag.value = dtlb_tag_access_read();
334
    tag.value = dtlb_tag_access_read();
293
    va = tag.vpn * PAGE_SIZE;
335
    va = tag.vpn * PAGE_SIZE;
294
 
336
 
295
    printf("Faulting page: %p, ASID=%d\n", va, tag.context);
337
    printf("Faulting page: %p, ASID=%d\n", va, tag.context);
296
    printf("TPC=%p, (%s)\n", istate->tpc, tpc_str);
338
    printf("TPC=%p, (%s)\n", istate->tpc, tpc_str);
297
    panic("%s\n", str);
339
    panic("%s\n", str);
298
}
340
}
299
 
341
 
300
/** Invalidate all unlocked ITLB and DTLB entries. */
342
/** Invalidate all unlocked ITLB and DTLB entries. */
301
void tlb_invalidate_all(void)
343
void tlb_invalidate_all(void)
302
{
344
{
303
    int i;
345
    int i;
304
    tlb_data_t d;
346
    tlb_data_t d;
305
    tlb_tag_read_reg_t t;
347
    tlb_tag_read_reg_t t;
306
 
348
 
307
    for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
349
    for (i = 0; i < ITLB_ENTRY_COUNT; i++) {
308
        d.value = itlb_data_access_read(i);
350
        d.value = itlb_data_access_read(i);
309
        if (!d.l) {
351
        if (!d.l) {
310
            t.value = itlb_tag_read_read(i);
352
            t.value = itlb_tag_read_read(i);
311
            d.v = false;
353
            d.v = false;
312
            itlb_tag_access_write(t.value);
354
            itlb_tag_access_write(t.value);
313
            itlb_data_access_write(i, d.value);
355
            itlb_data_access_write(i, d.value);
314
        }
356
        }
315
    }
357
    }
316
   
358
   
317
    for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
359
    for (i = 0; i < DTLB_ENTRY_COUNT; i++) {
318
        d.value = dtlb_data_access_read(i);
360
        d.value = dtlb_data_access_read(i);
319
        if (!d.l) {
361
        if (!d.l) {
320
            t.value = dtlb_tag_read_read(i);
362
            t.value = dtlb_tag_read_read(i);
321
            d.v = false;
363
            d.v = false;
322
            dtlb_tag_access_write(t.value);
364
            dtlb_tag_access_write(t.value);
323
            dtlb_data_access_write(i, d.value);
365
            dtlb_data_access_write(i, d.value);
324
        }
366
        }
325
    }
367
    }
326
   
368
   
327
}
369
}
328
 
370
 
329
/** Invalidate all ITLB and DTLB entries that belong to specified ASID (Context).
371
/** Invalidate all ITLB and DTLB entries that belong to specified ASID (Context).
330
 *
372
 *
331
 * @param asid Address Space ID.
373
 * @param asid Address Space ID.
332
 */
374
 */
333
void tlb_invalidate_asid(asid_t asid)
375
void tlb_invalidate_asid(asid_t asid)
334
{
376
{
335
    /* TODO: write asid to some Context register and encode the register in second parameter below. */
377
    /* TODO: write asid to some Context register and encode the register in second parameter below. */
336
    itlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_NUCLEUS, 0);
378
    itlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_NUCLEUS, 0);
337
    dtlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_NUCLEUS, 0);
379
    dtlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_NUCLEUS, 0);
338
}
380
}
339
 
381
 
340
/** Invalidate all ITLB and DTLB entries for specified page range in specified address space.
382
/** Invalidate all ITLB and DTLB entries for specified page range in specified address space.
341
 *
383
 *
342
 * @param asid Address Space ID.
384
 * @param asid Address Space ID.
343
 * @param page First page which to sweep out from ITLB and DTLB.
385
 * @param page First page which to sweep out from ITLB and DTLB.
344
 * @param cnt Number of ITLB and DTLB entries to invalidate.
386
 * @param cnt Number of ITLB and DTLB entries to invalidate.
345
 */
387
 */
346
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
388
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt)
347
{
389
{
348
    int i;
390
    int i;
349
   
391
   
350
    for (i = 0; i < cnt; i++) {
392
    for (i = 0; i < cnt; i++) {
351
        /* TODO: write asid to some Context register and encode the register in second parameter below. */
393
        /* TODO: write asid to some Context register and encode the register in second parameter below. */
352
        itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, page + i * PAGE_SIZE);
394
        itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, page + i * PAGE_SIZE);
353
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, page + i * PAGE_SIZE);
395
        dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, page + i * PAGE_SIZE);
354
    }
396
    }
355
}
397
}
356
 
398
 
357
/** @}
399
/** @}
358
 */
400
 */
359
 
401