Subversion Repositories HelenOS

Rev

Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
1 jermar 1
/*
319 jermar 2
 * Copyright (C) 2003-2004 Jakub Jermar
1 jermar 3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
29
#include <arch/mm/tlb.h>
130 decky 30
#include <arch/mm/asid.h>
1 jermar 31
#include <mm/tlb.h>
391 jermar 32
#include <mm/page.h>
33
#include <mm/vm.h>
1 jermar 34
#include <arch/cp0.h>
35
#include <panic.h>
36
#include <arch.h>
268 palkovsky 37
#include <symtab.h>
391 jermar 38
#include <synch/spinlock.h>
39
#include <print.h>
268 palkovsky 40
 
391 jermar 41
static void tlb_refill_fail(struct exception_regdump *pstate);
42
static void tlb_invalid_fail(struct exception_regdump *pstate);
43
static void tlb_modified_fail(struct exception_regdump *pstate);
44
 
394 jermar 45
static pte_t *find_mapping_and_check(__address badvaddr);
46
static void prepare_entry_lo(struct entry_lo *lo, bool g, bool v, bool d, bool c, __address pfn);
47
 
391 jermar 48
/** Initialize TLB
49
 *
50
 * Initialize TLB.
51
 * Invalidate all entries and mark wired entries.
52
 */
389 jermar 53
void tlb_init_arch(void)
54
{
55
	int i;
56
 
57
	cp0_pagemask_write(TLB_PAGE_MASK_16K);
58
	cp0_entry_hi_write(0);
59
	cp0_entry_lo0_write(0);
60
	cp0_entry_lo1_write(0);
61
 
62
	/*
63
	 * Invalidate all entries.
64
	 */
65
	for (i = 0; i < TLB_SIZE; i++) {
391 jermar 66
		cp0_index_write(i);
389 jermar 67
		tlbwi();
68
	}
69
 
70
	/*
71
	 * The kernel is going to make use of some wired
391 jermar 72
	 * entries (e.g. mapping kernel stacks in kseg3).
389 jermar 73
	 */
74
	cp0_wired_write(TLB_WIRED);
75
}
76
 
391 jermar 77
/** Process TLB Refill Exception
78
 *
79
 * Process TLB Refill Exception.
80
 *
81
 * @param pstate Interrupted register context.
82
 */
317 palkovsky 83
void tlb_refill(struct exception_regdump *pstate)
1 jermar 84
{
394 jermar 85
	struct entry_lo lo;
391 jermar 86
	__address badvaddr;
87
	pte_t *pte;
88
 
89
	badvaddr = cp0_badvaddr_read();
90
 
394 jermar 91
	spinlock_lock(&VM->lock);		
92
	pte = find_mapping_and_check(badvaddr);
93
	if (!pte)
391 jermar 94
		goto fail;
95
 
96
	/*
394 jermar 97
	 * Record access to PTE.
391 jermar 98
	 */
394 jermar 99
	pte->a = 1;
391 jermar 100
 
394 jermar 101
	prepare_entry_lo(&lo, pte->g, pte->v, pte->d, pte->c, pte->pfn);
102
 
391 jermar 103
	/*
104
	 * New entry is to be inserted into TLB
105
	 */
106
	if ((badvaddr/PAGE_SIZE) % 2 == 0) {
394 jermar 107
		cp0_entry_lo0_write(*((__u32 *) &lo));
391 jermar 108
		cp0_entry_lo1_write(0);
109
	}
110
	else {
111
		cp0_entry_lo0_write(0);
394 jermar 112
		cp0_entry_lo1_write(*((__u32 *) &lo));
391 jermar 113
	}
114
	tlbwr();
115
 
116
	spinlock_unlock(&VM->lock);
117
	return;
118
 
119
fail:
120
	spinlock_unlock(&VM->lock);
121
	tlb_refill_fail(pstate);
122
}
123
 
394 jermar 124
/** Process TLB Invalid Exception
125
 *
126
 * Process TLB Invalid Exception.
127
 *
128
 * @param pstate Interrupted register context.
129
 */
391 jermar 130
void tlb_invalid(struct exception_regdump *pstate)
131
{
394 jermar 132
	struct index index;
133
	__address badvaddr;
134
	struct entry_lo lo;
135
	pte_t *pte;
136
 
137
	badvaddr = cp0_badvaddr_read();
138
 
139
	/*
140
	 * Locate the faulting entry in TLB.
141
	 */
142
	tlbp();
143
	*((__u32 *) &index) = cp0_index_read();
144
 
145
	spinlock_lock(&VM->lock);	
146
 
147
	/*
148
	 * Fail if the entry is not in TLB.
149
	 */
150
	if (index.p)
151
		goto fail;
152
 
153
	pte = find_mapping_and_check(badvaddr);
154
	if (!pte)
155
		goto fail;
156
 
157
	/*
158
	 * Read the faulting TLB entry.
159
	 */
160
	tlbr();
161
 
162
	/*
163
	 * Record access to PTE.
164
	 */
165
	pte->a = 1;
166
 
167
	prepare_entry_lo(&lo, pte->g, pte->v, pte->d, pte->c, pte->pfn);
168
 
169
	/*
170
	 * The entry is to be updated in TLB.
171
	 */
172
	if ((badvaddr/PAGE_SIZE) % 2 == 0)
173
		cp0_entry_lo0_write(*((__u32 *) &lo));
174
	else
175
		cp0_entry_lo1_write(*((__u32 *) &lo));
176
	tlbwi();
177
 
178
	spinlock_unlock(&VM->lock);	
179
	return;
180
 
181
fail:
182
	spinlock_unlock(&VM->lock);
391 jermar 183
	tlb_invalid_fail(pstate);
184
}
185
 
394 jermar 186
/** Process TLB Modified Exception
187
 *
188
 * Process TLB Modified Exception.
189
 *
190
 * @param pstate Interrupted register context.
191
 */
192
 
391 jermar 193
void tlb_modified(struct exception_regdump *pstate)
194
{
394 jermar 195
	struct index index;
196
	__address badvaddr;
197
	struct entry_lo lo;
198
	pte_t *pte;
199
 
200
	badvaddr = cp0_badvaddr_read();
201
 
202
	/*
203
	 * Locate the faulting entry in TLB.
204
	 */
205
	tlbp();
206
	*((__u32 *) &index) = cp0_index_read();
207
 
208
	spinlock_lock(&VM->lock);	
209
 
210
	/*
211
	 * Fail if the entry is not in TLB.
212
	 */
213
	if (index.p)
214
		goto fail;
215
 
216
	pte = find_mapping_and_check(badvaddr);
217
	if (!pte)
218
		goto fail;
219
 
220
	/*
221
	 * Fail if the page is not writable.
222
	 */
223
	if (!pte->w)
224
		goto fail;
225
 
226
	/*
227
	 * Read the faulting TLB entry.
228
	 */
229
	tlbr();
230
 
231
	/*
232
	 * Record access and write to PTE.
233
	 */
234
	pte->a = 1;
235
	pte->d = 1;
236
 
237
	prepare_entry_lo(&lo, pte->g, pte->v, pte->w, pte->c, pte->pfn);
238
 
239
	/*
240
	 * The entry is to be updated in TLB.
241
	 */
242
	if ((badvaddr/PAGE_SIZE) % 2 == 0)
243
		cp0_entry_lo0_write(*((__u32 *) &lo));
244
	else
245
		cp0_entry_lo1_write(*((__u32 *) &lo));
246
	tlbwi();
247
 
248
	spinlock_unlock(&VM->lock);	
249
	return;
250
 
251
fail:
252
	spinlock_unlock(&VM->lock);
391 jermar 253
	tlb_modified_fail(pstate);
254
}
255
 
256
void tlb_refill_fail(struct exception_regdump *pstate)
257
{
324 palkovsky 258
	char *symbol = "";
259
	char *sym2 = "";
260
 
332 palkovsky 261
	char *s = get_symtab_entry(pstate->epc);
262
	if (s)
263
		symbol = s;
264
	s = get_symtab_entry(pstate->ra);
265
	if (s)
266
		sym2 = s;
391 jermar 267
	panic("%X: TLB Refill Exception at %X(%s<-%s)\n", cp0_badvaddr_read(), pstate->epc, symbol, sym2);
1 jermar 268
}
269
 
391 jermar 270
 
271
void tlb_invalid_fail(struct exception_regdump *pstate)
1 jermar 272
{
268 palkovsky 273
	char *symbol = "";
274
 
332 palkovsky 275
	char *s = get_symtab_entry(pstate->epc);
276
	if (s)
277
		symbol = s;
394 jermar 278
	panic("%X: TLB Invalid Exception at %X(%s)\n", cp0_badvaddr_read(), pstate->epc, symbol);
1 jermar 279
}
280
 
391 jermar 281
void tlb_modified_fail(struct exception_regdump *pstate)
389 jermar 282
{
283
	char *symbol = "";
284
 
285
	char *s = get_symtab_entry(pstate->epc);
286
	if (s)
287
		symbol = s;
394 jermar 288
	panic("%X: TLB Modified Exception at %X(%s)\n", cp0_badvaddr_read(), pstate->epc, symbol);
389 jermar 289
}
290
 
291
 
1 jermar 292
void tlb_invalidate(int asid)
293
{
130 decky 294
	pri_t pri;
295
 
296
	pri = cpu_priority_high();
297
 
298
	// TODO
299
 
300
	cpu_priority_restore(pri);
1 jermar 301
}
394 jermar 302
 
303
/** Try to find PTE for faulting address
304
 *
305
 * Try to find PTE for faulting address.
306
 * The VM->lock must be held on entry to this function.
307
 *
308
 * @param badvaddr Faulting virtual address.
309
 *
310
 * @return PTE on success, NULL otherwise.
311
 */
312
pte_t *find_mapping_and_check(__address badvaddr)
313
{
314
	struct entry_hi hi;
315
	pte_t *pte;
316
 
317
	*((__u32 *) &hi) = cp0_entry_hi_read();
318
 
319
	/*
320
	 * Handler cannot succeed if the ASIDs don't match.
321
	 */
322
	if (hi.asid != VM->asid)
323
		return NULL;
324
 
325
	/*
326
	 * Handler cannot succeed if badvaddr has no mapping.
327
	 */
328
	pte = find_mapping(badvaddr, 0);
329
	if (!pte)
330
		return NULL;
331
 
332
	/*
333
	 * Handler cannot succeed if the mapping is marked as invalid.
334
	 */
335
	if (!pte->v)
336
		return NULL;
337
 
338
	return pte;
339
}
340
 
341
void prepare_entry_lo(struct entry_lo *lo, bool g, bool v, bool d, bool c, __address pfn)
342
{
343
	lo->g = g;
344
	lo->v = v;
345
	lo->d = d;
346
	lo->c = c;
347
	lo->pfn = pfn;
348
	lo->zero = 0;
349
}