Subversion Repositories HelenOS

Rev

Rev 394 | Rev 396 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 394 Rev 395
1
/*
1
/*
2
 * Copyright (C) 2003-2004 Jakub Jermar
2
 * Copyright (C) 2003-2004 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
#include <arch/mm/tlb.h>
29
#include <arch/mm/tlb.h>
30
#include <arch/mm/asid.h>
30
#include <arch/mm/asid.h>
31
#include <mm/tlb.h>
31
#include <mm/tlb.h>
32
#include <mm/page.h>
32
#include <mm/page.h>
33
#include <mm/vm.h>
33
#include <mm/vm.h>
34
#include <arch/cp0.h>
34
#include <arch/cp0.h>
35
#include <panic.h>
35
#include <panic.h>
36
#include <arch.h>
36
#include <arch.h>
37
#include <symtab.h>
37
#include <symtab.h>
38
#include <synch/spinlock.h>
38
#include <synch/spinlock.h>
39
#include <print.h>
39
#include <print.h>
40
 
40
 
41
static void tlb_refill_fail(struct exception_regdump *pstate);
41
static void tlb_refill_fail(struct exception_regdump *pstate);
42
static void tlb_invalid_fail(struct exception_regdump *pstate);
42
static void tlb_invalid_fail(struct exception_regdump *pstate);
43
static void tlb_modified_fail(struct exception_regdump *pstate);
43
static void tlb_modified_fail(struct exception_regdump *pstate);
44
 
44
 
45
static pte_t *find_mapping_and_check(__address badvaddr);
45
static pte_t *find_mapping_and_check(__address badvaddr);
46
static void prepare_entry_lo(struct entry_lo *lo, bool g, bool v, bool d, bool c, __address pfn);
46
static void prepare_entry_lo(struct entry_lo *lo, bool g, bool v, bool d, int c, __address pfn);
47
 
47
 
48
/** Initialize TLB
48
/** Initialize TLB
49
 *
49
 *
50
 * Initialize TLB.
50
 * Initialize TLB.
51
 * Invalidate all entries and mark wired entries.
51
 * Invalidate all entries and mark wired entries.
52
 */
52
 */
53
void tlb_init_arch(void)
53
void tlb_init_arch(void)
54
{
54
{
55
    int i;
55
    int i;
56
 
56
 
57
    cp0_pagemask_write(TLB_PAGE_MASK_16K);
57
    cp0_pagemask_write(TLB_PAGE_MASK_16K);
58
    cp0_entry_hi_write(0);
58
    cp0_entry_hi_write(0);
59
    cp0_entry_lo0_write(0);
59
    cp0_entry_lo0_write(0);
60
    cp0_entry_lo1_write(0);
60
    cp0_entry_lo1_write(0);
61
 
61
 
62
    /*
62
    /*
63
     * Invalidate all entries.
63
     * Invalidate all entries.
64
     */
64
     */
65
    for (i = 0; i < TLB_SIZE; i++) {
65
    for (i = 0; i < TLB_SIZE; i++) {
66
        cp0_index_write(i);
66
        cp0_index_write(i);
67
        tlbwi();
67
        tlbwi();
68
    }
68
    }
69
   
69
   
70
    /*
70
    /*
71
     * The kernel is going to make use of some wired
71
     * The kernel is going to make use of some wired
72
     * entries (e.g. mapping kernel stacks in kseg3).
72
     * entries (e.g. mapping kernel stacks in kseg3).
73
     */
73
     */
74
    cp0_wired_write(TLB_WIRED);
74
    cp0_wired_write(TLB_WIRED);
75
}
75
}
76
 
76
 
77
/** Process TLB Refill Exception
77
/** Process TLB Refill Exception
78
 *
78
 *
79
 * Process TLB Refill Exception.
79
 * Process TLB Refill Exception.
80
 *
80
 *
81
 * @param pstate Interrupted register context.
81
 * @param pstate Interrupted register context.
82
 */
82
 */
83
void tlb_refill(struct exception_regdump *pstate)
83
void tlb_refill(struct exception_regdump *pstate)
84
{
84
{
85
    struct entry_lo lo;
85
    struct entry_lo lo;
86
    __address badvaddr;
86
    __address badvaddr;
87
    pte_t *pte;
87
    pte_t *pte;
88
   
88
   
89
    badvaddr = cp0_badvaddr_read();
89
    badvaddr = cp0_badvaddr_read();
90
   
90
   
91
    spinlock_lock(&VM->lock);      
91
    spinlock_lock(&VM->lock);      
92
    pte = find_mapping_and_check(badvaddr);
92
    pte = find_mapping_and_check(badvaddr);
93
    if (!pte)
93
    if (!pte)
94
        goto fail;
94
        goto fail;
95
 
95
 
96
    /*
96
    /*
97
     * Record access to PTE.
97
     * Record access to PTE.
98
     */
98
     */
99
    pte->a = 1;
99
    pte->a = 1;
100
 
100
 
101
    prepare_entry_lo(&lo, pte->g, pte->v, pte->d, pte->c, pte->pfn);
101
    prepare_entry_lo(&lo, pte->g, pte->v, pte->d, pte->c, pte->pfn);
102
 
102
 
103
    /*
103
    /*
104
     * New entry is to be inserted into TLB
104
     * New entry is to be inserted into TLB
105
     */
105
     */
106
    if ((badvaddr/PAGE_SIZE) % 2 == 0) {
106
    if ((badvaddr/PAGE_SIZE) % 2 == 0) {
107
        cp0_entry_lo0_write(*((__u32 *) &lo));
107
        cp0_entry_lo0_write(*((__u32 *) &lo));
108
        cp0_entry_lo1_write(0);
108
        cp0_entry_lo1_write(0);
109
    }
109
    }
110
    else {
110
    else {
111
        cp0_entry_lo0_write(0);
111
        cp0_entry_lo0_write(0);
112
        cp0_entry_lo1_write(*((__u32 *) &lo));
112
        cp0_entry_lo1_write(*((__u32 *) &lo));
113
    }
113
    }
114
    tlbwr();
114
    tlbwr();
115
 
115
 
116
    spinlock_unlock(&VM->lock);
116
    spinlock_unlock(&VM->lock);
117
    return;
117
    return;
118
   
118
   
119
fail:
119
fail:
120
    spinlock_unlock(&VM->lock);
120
    spinlock_unlock(&VM->lock);
121
    tlb_refill_fail(pstate);
121
    tlb_refill_fail(pstate);
122
}
122
}
123
 
123
 
124
/** Process TLB Invalid Exception
124
/** Process TLB Invalid Exception
125
 *
125
 *
126
 * Process TLB Invalid Exception.
126
 * Process TLB Invalid Exception.
127
 *
127
 *
128
 * @param pstate Interrupted register context.
128
 * @param pstate Interrupted register context.
129
 */
129
 */
130
void tlb_invalid(struct exception_regdump *pstate)
130
void tlb_invalid(struct exception_regdump *pstate)
131
{
131
{
132
    struct index index;
132
    struct index index;
133
    __address badvaddr;
133
    __address badvaddr;
134
    struct entry_lo lo;
134
    struct entry_lo lo;
135
    pte_t *pte;
135
    pte_t *pte;
136
 
136
 
137
    badvaddr = cp0_badvaddr_read();
137
    badvaddr = cp0_badvaddr_read();
138
 
138
 
139
    /*
139
    /*
140
     * Locate the faulting entry in TLB.
140
     * Locate the faulting entry in TLB.
141
     */
141
     */
142
    tlbp();
142
    tlbp();
143
    *((__u32 *) &index) = cp0_index_read();
143
    *((__u32 *) &index) = cp0_index_read();
144
   
144
   
145
    spinlock_lock(&VM->lock);  
145
    spinlock_lock(&VM->lock);  
146
   
146
   
147
    /*
147
    /*
148
     * Fail if the entry is not in TLB.
148
     * Fail if the entry is not in TLB.
149
     */
149
     */
150
    if (index.p)
150
    if (index.p)
151
        goto fail;
151
        goto fail;
152
 
152
 
153
    pte = find_mapping_and_check(badvaddr);
153
    pte = find_mapping_and_check(badvaddr);
154
    if (!pte)
154
    if (!pte)
155
        goto fail;
155
        goto fail;
156
 
156
 
157
    /*
157
    /*
158
     * Read the faulting TLB entry.
158
     * Read the faulting TLB entry.
159
     */
159
     */
160
    tlbr();
160
    tlbr();
161
 
161
 
162
    /*
162
    /*
163
     * Record access to PTE.
163
     * Record access to PTE.
164
     */
164
     */
165
    pte->a = 1;
165
    pte->a = 1;
166
 
166
 
167
    prepare_entry_lo(&lo, pte->g, pte->v, pte->d, pte->c, pte->pfn);
167
    prepare_entry_lo(&lo, pte->g, pte->v, pte->d, pte->c, pte->pfn);
168
 
168
 
169
    /*
169
    /*
170
     * The entry is to be updated in TLB.
170
     * The entry is to be updated in TLB.
171
     */
171
     */
172
    if ((badvaddr/PAGE_SIZE) % 2 == 0)
172
    if ((badvaddr/PAGE_SIZE) % 2 == 0)
173
        cp0_entry_lo0_write(*((__u32 *) &lo));
173
        cp0_entry_lo0_write(*((__u32 *) &lo));
174
    else
174
    else
175
        cp0_entry_lo1_write(*((__u32 *) &lo));
175
        cp0_entry_lo1_write(*((__u32 *) &lo));
176
    tlbwi();
176
    tlbwi();
177
 
177
 
178
    spinlock_unlock(&VM->lock);
178
    spinlock_unlock(&VM->lock);
179
    return;
179
    return;
180
   
180
   
181
fail:
181
fail:
182
    spinlock_unlock(&VM->lock);
182
    spinlock_unlock(&VM->lock);
183
    tlb_invalid_fail(pstate);
183
    tlb_invalid_fail(pstate);
184
}
184
}
185
 
185
 
186
/** Process TLB Modified Exception
186
/** Process TLB Modified Exception
187
 *
187
 *
188
 * Process TLB Modified Exception.
188
 * Process TLB Modified Exception.
189
 *
189
 *
190
 * @param pstate Interrupted register context.
190
 * @param pstate Interrupted register context.
191
 */
191
 */
192
 
192
 
193
void tlb_modified(struct exception_regdump *pstate)
193
void tlb_modified(struct exception_regdump *pstate)
194
{
194
{
195
    struct index index;
195
    struct index index;
196
    __address badvaddr;
196
    __address badvaddr;
197
    struct entry_lo lo;
197
    struct entry_lo lo;
198
    pte_t *pte;
198
    pte_t *pte;
199
 
199
 
200
    badvaddr = cp0_badvaddr_read();
200
    badvaddr = cp0_badvaddr_read();
201
 
201
 
202
    /*
202
    /*
203
     * Locate the faulting entry in TLB.
203
     * Locate the faulting entry in TLB.
204
     */
204
     */
205
    tlbp();
205
    tlbp();
206
    *((__u32 *) &index) = cp0_index_read();
206
    *((__u32 *) &index) = cp0_index_read();
207
   
207
   
208
    spinlock_lock(&VM->lock);  
208
    spinlock_lock(&VM->lock);  
209
   
209
   
210
    /*
210
    /*
211
     * Fail if the entry is not in TLB.
211
     * Fail if the entry is not in TLB.
212
     */
212
     */
213
    if (index.p)
213
    if (index.p)
214
        goto fail;
214
        goto fail;
215
 
215
 
216
    pte = find_mapping_and_check(badvaddr);
216
    pte = find_mapping_and_check(badvaddr);
217
    if (!pte)
217
    if (!pte)
218
        goto fail;
218
        goto fail;
219
 
219
 
220
    /*
220
    /*
221
     * Fail if the page is not writable.
221
     * Fail if the page is not writable.
222
     */
222
     */
223
    if (!pte->w)
223
    if (!pte->w)
224
        goto fail;
224
        goto fail;
225
 
225
 
226
    /*
226
    /*
227
     * Read the faulting TLB entry.
227
     * Read the faulting TLB entry.
228
     */
228
     */
229
    tlbr();
229
    tlbr();
230
 
230
 
231
    /*
231
    /*
232
     * Record access and write to PTE.
232
     * Record access and write to PTE.
233
     */
233
     */
234
    pte->a = 1;
234
    pte->a = 1;
235
    pte->d = 1;
235
    pte->d = 1;
236
 
236
 
237
    prepare_entry_lo(&lo, pte->g, pte->v, pte->w, pte->c, pte->pfn);
237
    prepare_entry_lo(&lo, pte->g, pte->v, pte->w, pte->c, pte->pfn);
238
 
238
 
239
    /*
239
    /*
240
     * The entry is to be updated in TLB.
240
     * The entry is to be updated in TLB.
241
     */
241
     */
242
    if ((badvaddr/PAGE_SIZE) % 2 == 0)
242
    if ((badvaddr/PAGE_SIZE) % 2 == 0)
243
        cp0_entry_lo0_write(*((__u32 *) &lo));
243
        cp0_entry_lo0_write(*((__u32 *) &lo));
244
    else
244
    else
245
        cp0_entry_lo1_write(*((__u32 *) &lo));
245
        cp0_entry_lo1_write(*((__u32 *) &lo));
246
    tlbwi();
246
    tlbwi();
247
 
247
 
248
    spinlock_unlock(&VM->lock);
248
    spinlock_unlock(&VM->lock);
249
    return;
249
    return;
250
   
250
   
251
fail:
251
fail:
252
    spinlock_unlock(&VM->lock);
252
    spinlock_unlock(&VM->lock);
253
    tlb_modified_fail(pstate);
253
    tlb_modified_fail(pstate);
254
}
254
}
255
 
255
 
256
void tlb_refill_fail(struct exception_regdump *pstate)
256
void tlb_refill_fail(struct exception_regdump *pstate)
257
{
257
{
258
    char *symbol = "";
258
    char *symbol = "";
259
    char *sym2 = "";
259
    char *sym2 = "";
260
 
260
 
261
    char *s = get_symtab_entry(pstate->epc);
261
    char *s = get_symtab_entry(pstate->epc);
262
    if (s)
262
    if (s)
263
        symbol = s;
263
        symbol = s;
264
    s = get_symtab_entry(pstate->ra);
264
    s = get_symtab_entry(pstate->ra);
265
    if (s)
265
    if (s)
266
        sym2 = s;
266
        sym2 = s;
267
    panic("%X: TLB Refill Exception at %X(%s<-%s)\n", cp0_badvaddr_read(), pstate->epc, symbol, sym2);
267
    panic("%X: TLB Refill Exception at %X(%s<-%s)\n", cp0_badvaddr_read(), pstate->epc, symbol, sym2);
268
}
268
}
269
 
269
 
270
 
270
 
271
void tlb_invalid_fail(struct exception_regdump *pstate)
271
void tlb_invalid_fail(struct exception_regdump *pstate)
272
{
272
{
273
    char *symbol = "";
273
    char *symbol = "";
274
 
274
 
275
    char *s = get_symtab_entry(pstate->epc);
275
    char *s = get_symtab_entry(pstate->epc);
276
    if (s)
276
    if (s)
277
        symbol = s;
277
        symbol = s;
278
    panic("%X: TLB Invalid Exception at %X(%s)\n", cp0_badvaddr_read(), pstate->epc, symbol);
278
    panic("%X: TLB Invalid Exception at %X(%s)\n", cp0_badvaddr_read(), pstate->epc, symbol);
279
}
279
}
280
 
280
 
281
void tlb_modified_fail(struct exception_regdump *pstate)
281
void tlb_modified_fail(struct exception_regdump *pstate)
282
{
282
{
283
    char *symbol = "";
283
    char *symbol = "";
284
 
284
 
285
    char *s = get_symtab_entry(pstate->epc);
285
    char *s = get_symtab_entry(pstate->epc);
286
    if (s)
286
    if (s)
287
        symbol = s;
287
        symbol = s;
288
    panic("%X: TLB Modified Exception at %X(%s)\n", cp0_badvaddr_read(), pstate->epc, symbol);
288
    panic("%X: TLB Modified Exception at %X(%s)\n", cp0_badvaddr_read(), pstate->epc, symbol);
289
}
289
}
290
 
290
 
291
 
291
 
292
void tlb_invalidate(int asid)
292
void tlb_invalidate(int asid)
293
{
293
{
294
    pri_t pri;
294
    pri_t pri;
295
   
295
   
296
    pri = cpu_priority_high();
296
    pri = cpu_priority_high();
297
   
297
   
298
    // TODO
298
    // TODO
299
   
299
   
300
    cpu_priority_restore(pri);
300
    cpu_priority_restore(pri);
301
}
301
}
302
 
302
 
303
/** Try to find PTE for faulting address
303
/** Try to find PTE for faulting address
304
 *
304
 *
305
 * Try to find PTE for faulting address.
305
 * Try to find PTE for faulting address.
306
 * The VM->lock must be held on entry to this function.
306
 * The VM->lock must be held on entry to this function.
307
 *
307
 *
308
 * @param badvaddr Faulting virtual address.
308
 * @param badvaddr Faulting virtual address.
309
 *
309
 *
310
 * @return PTE on success, NULL otherwise.
310
 * @return PTE on success, NULL otherwise.
311
 */
311
 */
312
pte_t *find_mapping_and_check(__address badvaddr)
312
pte_t *find_mapping_and_check(__address badvaddr)
313
{
313
{
314
    struct entry_hi hi;
314
    struct entry_hi hi;
315
    pte_t *pte;
315
    pte_t *pte;
316
 
316
 
317
    *((__u32 *) &hi) = cp0_entry_hi_read();
317
    *((__u32 *) &hi) = cp0_entry_hi_read();
318
 
318
 
319
    /*
319
    /*
320
     * Handler cannot succeed if the ASIDs don't match.
320
     * Handler cannot succeed if the ASIDs don't match.
321
     */
321
     */
322
    if (hi.asid != VM->asid)
322
    if (hi.asid != VM->asid)
323
        return NULL;
323
        return NULL;
324
   
324
   
325
    /*
325
    /*
326
     * Handler cannot succeed if badvaddr has no mapping.
326
     * Handler cannot succeed if badvaddr has no mapping.
327
     */
327
     */
328
    pte = find_mapping(badvaddr, 0);
328
    pte = find_mapping(badvaddr, 0);
329
    if (!pte)
329
    if (!pte)
330
        return NULL;
330
        return NULL;
331
 
331
 
332
    /*
332
    /*
333
     * Handler cannot succeed if the mapping is marked as invalid.
333
     * Handler cannot succeed if the mapping is marked as invalid.
334
     */
334
     */
335
    if (!pte->v)
335
    if (!pte->v)
336
        return NULL;
336
        return NULL;
337
 
337
 
338
    return pte;
338
    return pte;
339
}
339
}
340
 
340
 
341
void prepare_entry_lo(struct entry_lo *lo, bool g, bool v, bool d, bool c, __address pfn)
341
void prepare_entry_lo(struct entry_lo *lo, bool g, bool v, bool d, int c, __address pfn)
342
{
342
{
343
    lo->g = g;
343
    lo->g = g;
344
    lo->v = v;
344
    lo->v = v;
345
    lo->d = d;
345
    lo->d = d;
346
    lo->c = c;
346
    lo->c = c;
347
    lo->pfn = pfn;
347
    lo->pfn = pfn;
348
    lo->zero = 0;
348
    lo->zero = 0;
349
}
349
}
350
 
350