Subversion Repositories HelenOS

Rev

Rev 1411 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
740 jermar 1
/*
2
 * Copyright (C) 2006 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
29
/*
30
 * TLB management.
31
 */
32
 
33
#include <mm/tlb.h>
901 jermar 34
#include <mm/asid.h>
902 jermar 35
#include <mm/page.h>
36
#include <mm/as.h>
818 vana 37
#include <arch/mm/tlb.h>
901 jermar 38
#include <arch/mm/page.h>
1210 vana 39
#include <arch/mm/vhpt.h>
819 vana 40
#include <arch/barrier.h>
900 jermar 41
#include <arch/interrupt.h>
928 vana 42
#include <arch/pal/pal.h>
43
#include <arch/asm.h>
899 jermar 44
#include <typedefs.h>
900 jermar 45
#include <panic.h>
993 jermar 46
#include <print.h>
902 jermar 47
#include <arch.h>
1621 vana 48
#include <interrupt.h>
740 jermar 49
 
756 jermar 50
/** Invalidate all TLB entries. */
740 jermar 51
void tlb_invalidate_all(void)
52
{
993 jermar 53
		ipl_t ipl;
928 vana 54
		__address adr;
993 jermar 55
		__u32 count1, count2, stride1, stride2;
928 vana 56
 
57
		int i,j;
58
 
993 jermar 59
		adr = PAL_PTCE_INFO_BASE();
60
		count1 = PAL_PTCE_INFO_COUNT1();
61
		count2 = PAL_PTCE_INFO_COUNT2();
62
		stride1 = PAL_PTCE_INFO_STRIDE1();
63
		stride2 = PAL_PTCE_INFO_STRIDE2();
928 vana 64
 
993 jermar 65
		ipl = interrupts_disable();
928 vana 66
 
993 jermar 67
		for(i = 0; i < count1; i++) {
68
			for(j = 0; j < count2; j++) {
69
				__asm__ volatile (
70
					"ptc.e %0 ;;"
928 vana 71
					:
993 jermar 72
					: "r" (adr)
928 vana 73
				);
993 jermar 74
				adr += stride2;
928 vana 75
			}
993 jermar 76
			adr += stride1;
928 vana 77
		}
78
 
993 jermar 79
		interrupts_restore(ipl);
928 vana 80
 
81
		srlz_d();
82
		srlz_i();
1210 vana 83
#ifdef CONFIG_VHPT
84
		vhpt_invalidate_all();
85
#endif	
740 jermar 86
}
87
 
88
/** Invalidate entries belonging to an address space.
89
 *
90
 * @param asid Address space identifier.
91
 */
92
void tlb_invalidate_asid(asid_t asid)
93
{
935 vana 94
	tlb_invalidate_all();
740 jermar 95
}
818 vana 96
 
935 vana 97
 
947 vana 98
void tlb_invalidate_pages(asid_t asid, __address page, count_t cnt)
935 vana 99
{
944 vana 100
	region_register rr;
101
	bool restore_rr = false;
993 jermar 102
	int b = 0;
103
	int c = cnt;
944 vana 104
 
947 vana 105
	__address va;
993 jermar 106
	va = page;
947 vana 107
 
944 vana 108
	rr.word = rr_read(VA2VRN(va));
109
	if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
110
		/*
111
		 * The selected region register does not contain required RID.
112
		 * Save the old content of the register and replace the RID.
113
		 */
114
		region_register rr0;
115
 
116
		rr0 = rr;
117
		rr0.map.rid = ASID2RID(asid, VA2VRN(va));
118
		rr_write(VA2VRN(va), rr0.word);
119
		srlz_d();
120
		srlz_i();
121
	}
122
 
993 jermar 123
	while(c >>= 1)
124
		b++;
125
	b >>= 1;
944 vana 126
	__u64 ps;
127
 
993 jermar 128
	switch (b) {
944 vana 129
		case 0: /*cnt 1-3*/
993 jermar 130
			ps = PAGE_WIDTH;
944 vana 131
			break;
132
		case 1: /*cnt 4-15*/
947 vana 133
			/*cnt=((cnt-1)/4)+1;*/
993 jermar 134
			ps = PAGE_WIDTH+2;
135
			va &= ~((1<<ps)-1);
944 vana 136
			break;
137
		case 2: /*cnt 16-63*/
947 vana 138
			/*cnt=((cnt-1)/16)+1;*/
993 jermar 139
			ps = PAGE_WIDTH+4;
140
			va &= ~((1<<ps)-1);
944 vana 141
			break;
142
		case 3: /*cnt 64-255*/
947 vana 143
			/*cnt=((cnt-1)/64)+1;*/
993 jermar 144
			ps = PAGE_WIDTH+6;
145
			va &= ~((1<<ps)-1);
944 vana 146
			break;
147
		case 4: /*cnt 256-1023*/
947 vana 148
			/*cnt=((cnt-1)/256)+1;*/
993 jermar 149
			ps = PAGE_WIDTH+8;
150
			va &= ~((1<<ps)-1);
944 vana 151
			break;
152
		case 5: /*cnt 1024-4095*/
947 vana 153
			/*cnt=((cnt-1)/1024)+1;*/
993 jermar 154
			ps = PAGE_WIDTH+10;
155
			va &= ~((1<<ps)-1);
944 vana 156
			break;
157
		case 6: /*cnt 4096-16383*/
947 vana 158
			/*cnt=((cnt-1)/4096)+1;*/
993 jermar 159
			ps = PAGE_WIDTH+12;
160
			va &= ~((1<<ps)-1);
944 vana 161
			break;
162
		case 7: /*cnt 16384-65535*/
163
		case 8: /*cnt 65536-(256K-1)*/
947 vana 164
			/*cnt=((cnt-1)/16384)+1;*/
993 jermar 165
			ps = PAGE_WIDTH+14;
166
			va &= ~((1<<ps)-1);
944 vana 167
			break;
168
		default:
947 vana 169
			/*cnt=((cnt-1)/(16384*16))+1;*/
944 vana 170
			ps=PAGE_WIDTH+18;
171
			va&=~((1<<ps)-1);
172
			break;
173
	}
947 vana 174
	/*cnt+=(page!=va);*/
993 jermar 175
	for(; va<(page+cnt*(PAGE_SIZE)); va += (1<<ps))	{
176
		__asm__ volatile (
947 vana 177
			"ptc.l %0,%1;;"
178
			:
993 jermar 179
			: "r" (va), "r" (ps<<2)
947 vana 180
		);
944 vana 181
	}
182
	srlz_d();
183
	srlz_i();
184
 
185
	if (restore_rr) {
186
		rr_write(VA2VRN(va), rr.word);
187
		srlz_d();
188
		srlz_i();
189
	}
935 vana 190
}
191
 
192
 
899 jermar 193
/** Insert data into data translation cache.
194
 *
195
 * @param va Virtual page address.
196
 * @param asid Address space identifier.
197
 * @param entry The rest of TLB entry as required by TLB insertion format.
198
 */
919 jermar 199
void dtc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry)
200
{
899 jermar 201
	tc_mapping_insert(va, asid, entry, true);
202
}
818 vana 203
 
899 jermar 204
/** Insert data into instruction translation cache.
205
 *
206
 * @param va Virtual page address.
207
 * @param asid Address space identifier.
208
 * @param entry The rest of TLB entry as required by TLB insertion format.
209
 */
919 jermar 210
void itc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry)
211
{
899 jermar 212
	tc_mapping_insert(va, asid, entry, false);
213
}
818 vana 214
 
899 jermar 215
/** Insert data into instruction or data translation cache.
216
 *
217
 * @param va Virtual page address.
218
 * @param asid Address space identifier.
219
 * @param entry The rest of TLB entry as required by TLB insertion format.
220
 * @param dtc If true, insert into data translation cache, use instruction translation cache otherwise.
221
 */
222
void tc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtc)
818 vana 223
{
224
	region_register rr;
899 jermar 225
	bool restore_rr = false;
818 vana 226
 
901 jermar 227
	rr.word = rr_read(VA2VRN(va));
228
	if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
899 jermar 229
		/*
230
		 * The selected region register does not contain required RID.
231
		 * Save the old content of the register and replace the RID.
232
		 */
233
		region_register rr0;
818 vana 234
 
899 jermar 235
		rr0 = rr;
901 jermar 236
		rr0.map.rid = ASID2RID(asid, VA2VRN(va));
237
		rr_write(VA2VRN(va), rr0.word);
899 jermar 238
		srlz_d();
239
		srlz_i();
818 vana 240
	}
899 jermar 241
 
242
	__asm__ volatile (
243
		"mov r8=psr;;\n"
900 jermar 244
		"rsm %0;;\n"   			/* PSR_IC_MASK */
899 jermar 245
		"srlz.d;;\n"
246
		"srlz.i;;\n"
247
		"mov cr.ifa=%1\n"		/* va */
248
		"mov cr.itir=%2;;\n"		/* entry.word[1] */
249
		"cmp.eq p6,p7 = %4,r0;;\n"	/* decide between itc and dtc */ 
250
		"(p6) itc.i %3;;\n"
251
		"(p7) itc.d %3;;\n"
252
		"mov psr.l=r8;;\n"
253
		"srlz.d;;\n"
254
		:
900 jermar 255
		: "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc)
256
		: "p6", "p7", "r8"
899 jermar 257
	);
258
 
259
	if (restore_rr) {
901 jermar 260
		rr_write(VA2VRN(va), rr.word);
819 vana 261
		srlz_d();
899 jermar 262
		srlz_i();
818 vana 263
	}
899 jermar 264
}
818 vana 265
 
899 jermar 266
/** Insert data into instruction translation register.
267
 *
268
 * @param va Virtual page address.
269
 * @param asid Address space identifier.
270
 * @param entry The rest of TLB entry as required by TLB insertion format.
271
 * @param tr Translation register.
272
 */
273
void itr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
274
{
275
	tr_mapping_insert(va, asid, entry, false, tr);
276
}
818 vana 277
 
899 jermar 278
/** Insert data into data translation register.
279
 *
280
 * @param va Virtual page address.
281
 * @param asid Address space identifier.
282
 * @param entry The rest of TLB entry as required by TLB insertion format.
283
 * @param tr Translation register.
284
 */
285
void dtr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
286
{
287
	tr_mapping_insert(va, asid, entry, true, tr);
818 vana 288
}
289
 
899 jermar 290
/** Insert data into instruction or data translation register.
291
 *
292
 * @param va Virtual page address.
293
 * @param asid Address space identifier.
294
 * @param entry The rest of TLB entry as required by TLB insertion format.
295
 * @param dtc If true, insert into data translation register, use instruction translation register otherwise.
296
 * @param tr Translation register.
297
 */
298
void tr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr)
818 vana 299
{
300
	region_register rr;
899 jermar 301
	bool restore_rr = false;
818 vana 302
 
901 jermar 303
	rr.word = rr_read(VA2VRN(va));
304
	if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
899 jermar 305
		/*
306
		 * The selected region register does not contain required RID.
307
		 * Save the old content of the register and replace the RID.
308
		 */
309
		region_register rr0;
818 vana 310
 
899 jermar 311
		rr0 = rr;
901 jermar 312
		rr0.map.rid = ASID2RID(asid, VA2VRN(va));
313
		rr_write(VA2VRN(va), rr0.word);
899 jermar 314
		srlz_d();
315
		srlz_i();
316
	}
818 vana 317
 
899 jermar 318
	__asm__ volatile (
319
		"mov r8=psr;;\n"
900 jermar 320
		"rsm %0;;\n"			/* PSR_IC_MASK */
899 jermar 321
		"srlz.d;;\n"
322
		"srlz.i;;\n"
323
		"mov cr.ifa=%1\n"        	/* va */		 
324
		"mov cr.itir=%2;;\n"		/* entry.word[1] */ 
325
		"cmp.eq p6,p7=%5,r0;;\n"	/* decide between itr and dtr */
326
		"(p6) itr.i itr[%4]=%3;;\n"
327
		"(p7) itr.d dtr[%4]=%3;;\n"
328
		"mov psr.l=r8;;\n"
329
		"srlz.d;;\n"
330
		:
900 jermar 331
		: "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr)
332
		: "p6", "p7", "r8"
899 jermar 333
	);
334
 
335
	if (restore_rr) {
901 jermar 336
		rr_write(VA2VRN(va), rr.word);
819 vana 337
		srlz_d();
899 jermar 338
		srlz_i();
818 vana 339
	}
899 jermar 340
}
818 vana 341
 
901 jermar 342
/** Insert data into DTLB.
343
 *
344
 * @param va Virtual page address.
345
 * @param asid Address space identifier.
346
 * @param entry The rest of TLB entry as required by TLB insertion format.
347
 * @param dtr If true, insert into data translation register, use data translation cache otherwise.
348
 * @param tr Translation register if dtr is true, ignored otherwise.
349
 */
902 jermar 350
void dtlb_kernel_mapping_insert(__address page, __address frame, bool dtr, index_t tr)
901 jermar 351
{
352
	tlb_entry_t entry;
353
 
354
	entry.word[0] = 0;
355
	entry.word[1] = 0;
356
 
357
	entry.p = true;			/* present */
358
	entry.ma = MA_WRITEBACK;
359
	entry.a = true;			/* already accessed */
360
	entry.d = true;			/* already dirty */
361
	entry.pl = PL_KERNEL;
362
	entry.ar = AR_READ | AR_WRITE;
363
	entry.ppn = frame >> PPN_SHIFT;
364
	entry.ps = PAGE_WIDTH;
365
 
366
	if (dtr)
367
		dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
368
	else
369
		dtc_mapping_insert(page, ASID_KERNEL, entry);
370
}
371
 
902 jermar 372
/** Copy content of PTE into data translation cache.
373
 *
374
 * @param t PTE.
375
 */
376
void dtc_pte_copy(pte_t *t)
377
{
378
	tlb_entry_t entry;
379
 
380
	entry.word[0] = 0;
381
	entry.word[1] = 0;
382
 
383
	entry.p = t->p;
384
	entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
385
	entry.a = t->a;
386
	entry.d = t->d;
387
	entry.pl = t->k ? PL_KERNEL : PL_USER;
388
	entry.ar = t->w ? AR_WRITE : AR_READ;
389
	entry.ppn = t->frame >> PPN_SHIFT;
390
	entry.ps = PAGE_WIDTH;
391
 
392
	dtc_mapping_insert(t->page, t->as->asid, entry);
1210 vana 393
#ifdef CONFIG_VHPT
394
	vhpt_mapping_insert(t->page, t->as->asid, entry);
395
#endif	
902 jermar 396
}
397
 
398
/** Copy content of PTE into instruction translation cache.
399
 *
400
 * @param t PTE.
401
 */
402
void itc_pte_copy(pte_t *t)
403
{
404
	tlb_entry_t entry;
405
 
406
	entry.word[0] = 0;
407
	entry.word[1] = 0;
408
 
409
	ASSERT(t->x);
410
 
411
	entry.p = t->p;
412
	entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
413
	entry.a = t->a;
414
	entry.pl = t->k ? PL_KERNEL : PL_USER;
415
	entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
416
	entry.ppn = t->frame >> PPN_SHIFT;
417
	entry.ps = PAGE_WIDTH;
418
 
419
	itc_mapping_insert(t->page, t->as->asid, entry);
1210 vana 420
#ifdef CONFIG_VHPT
421
	vhpt_mapping_insert(t->page, t->as->asid, entry);
422
#endif	
902 jermar 423
}
424
 
425
/** Instruction TLB fault handler for faults with VHPT turned off.
426
 *
427
 * @param vector Interruption vector.
958 jermar 428
 * @param istate Structure with saved interruption state.
902 jermar 429
 */
958 jermar 430
void alternate_instruction_tlb_fault(__u64 vector, istate_t *istate)
899 jermar 431
{
902 jermar 432
	region_register rr;
1411 jermar 433
	rid_t rid;
902 jermar 434
	__address va;
435
	pte_t *t;
436
 
958 jermar 437
	va = istate->cr_ifa;	/* faulting address */
1411 jermar 438
	rr.word = rr_read(VA2VRN(va));
439
	rid = rr.map.rid;
440
 
1044 jermar 441
	page_table_lock(AS, true);
902 jermar 442
	t = page_mapping_find(AS, va);
443
	if (t) {
444
		/*
445
		 * The mapping was found in software page hash table.
446
		 * Insert it into data translation cache.
447
		 */
448
		itc_pte_copy(t);
1044 jermar 449
		page_table_unlock(AS, true);
902 jermar 450
	} else {
451
		/*
452
		 * Forward the page fault to address space page fault handler.
453
		 */
1044 jermar 454
		page_table_unlock(AS, true);
1411 jermar 455
		if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
1621 vana 456
			fault_if_from_uspace(istate,"Page fault at %P",va);
1411 jermar 457
			panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
902 jermar 458
		}
459
	}
899 jermar 460
}
818 vana 461
 
902 jermar 462
/** Data TLB fault handler for faults with VHPT turned off.
901 jermar 463
 *
464
 * @param vector Interruption vector.
958 jermar 465
 * @param istate Structure with saved interruption state.
901 jermar 466
 */
958 jermar 467
void alternate_data_tlb_fault(__u64 vector, istate_t *istate)
899 jermar 468
{
901 jermar 469
	region_register rr;
470
	rid_t rid;
471
	__address va;
902 jermar 472
	pte_t *t;
901 jermar 473
 
958 jermar 474
	va = istate->cr_ifa;	/* faulting address */
901 jermar 475
	rr.word = rr_read(VA2VRN(va));
476
	rid = rr.map.rid;
477
	if (RID2ASID(rid) == ASID_KERNEL) {
478
		if (VA2VRN(va) == VRN_KERNEL) {
479
			/*
480
			 * Provide KA2PA(identity) mapping for faulting piece of
481
			 * kernel address space.
482
			 */
902 jermar 483
			dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
901 jermar 484
			return;
485
		}
486
	}
919 jermar 487
 
1044 jermar 488
	page_table_lock(AS, true);
902 jermar 489
	t = page_mapping_find(AS, va);
490
	if (t) {
491
		/*
492
		 * The mapping was found in software page hash table.
493
		 * Insert it into data translation cache.
494
		 */
495
		dtc_pte_copy(t);
1044 jermar 496
		page_table_unlock(AS, true);
902 jermar 497
	} else {
498
		/*
499
		 * Forward the page fault to address space page fault handler.
500
		 */
1044 jermar 501
		page_table_unlock(AS, true);
1411 jermar 502
		if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
1621 vana 503
			fault_if_from_uspace(istate,"Page fault at %P",va);
1221 decky 504
			panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
902 jermar 505
		}
506
	}
818 vana 507
}
508
 
902 jermar 509
/** Data nested TLB fault handler.
510
 *
511
 * This fault should not occur.
512
 *
513
 * @param vector Interruption vector.
958 jermar 514
 * @param istate Structure with saved interruption state.
902 jermar 515
 */
958 jermar 516
void data_nested_tlb_fault(__u64 vector, istate_t *istate)
899 jermar 517
{
518
	panic("%s\n", __FUNCTION__);
519
}
818 vana 520
 
902 jermar 521
/** Data Dirty bit fault handler.
522
 *
523
 * @param vector Interruption vector.
958 jermar 524
 * @param istate Structure with saved interruption state.
902 jermar 525
 */
958 jermar 526
void data_dirty_bit_fault(__u64 vector, istate_t *istate)
819 vana 527
{
1411 jermar 528
	region_register rr;
529
	rid_t rid;
530
	__address va;
902 jermar 531
	pte_t *t;
1411 jermar 532
 
533
	va = istate->cr_ifa;	/* faulting address */
534
	rr.word = rr_read(VA2VRN(va));
535
	rid = rr.map.rid;
902 jermar 536
 
1044 jermar 537
	page_table_lock(AS, true);
1411 jermar 538
	t = page_mapping_find(AS, va);
902 jermar 539
	ASSERT(t && t->p);
1411 jermar 540
	if (t && t->p && t->w) {
902 jermar 541
		/*
542
		 * Update the Dirty bit in page tables and reinsert
543
		 * the mapping into DTC.
544
		 */
545
		t->d = true;
546
		dtc_pte_copy(t);
1411 jermar 547
	} else {
548
		if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
1621 vana 549
			fault_if_from_uspace(istate,"Page fault at %P",va);
1411 jermar 550
			panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
551
			t->d = true;
552
			dtc_pte_copy(t);
553
		}
902 jermar 554
	}
1044 jermar 555
	page_table_unlock(AS, true);
899 jermar 556
}
819 vana 557
 
902 jermar 558
/** Instruction access bit fault handler.
559
 *
560
 * @param vector Interruption vector.
958 jermar 561
 * @param istate Structure with saved interruption state.
902 jermar 562
 */
958 jermar 563
void instruction_access_bit_fault(__u64 vector, istate_t *istate)
899 jermar 564
{
1411 jermar 565
	region_register rr;
566
	rid_t rid;
567
	__address va;
568
	pte_t *t;	
902 jermar 569
 
1411 jermar 570
	va = istate->cr_ifa;	/* faulting address */
571
	rr.word = rr_read(VA2VRN(va));
572
	rid = rr.map.rid;
573
 
1044 jermar 574
	page_table_lock(AS, true);
1411 jermar 575
	t = page_mapping_find(AS, va);
902 jermar 576
	ASSERT(t && t->p);
1411 jermar 577
	if (t && t->p && t->x) {
902 jermar 578
		/*
579
		 * Update the Accessed bit in page tables and reinsert
580
		 * the mapping into ITC.
581
		 */
582
		t->a = true;
583
		itc_pte_copy(t);
1411 jermar 584
	} else {
585
		if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
1621 vana 586
			fault_if_from_uspace(istate,"Page fault at %P",va);
1411 jermar 587
			panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
588
			t->a = true;
589
			itc_pte_copy(t);
590
		}
902 jermar 591
	}
1044 jermar 592
	page_table_unlock(AS, true);
899 jermar 593
}
819 vana 594
 
902 jermar 595
/** Data access bit fault handler.
596
 *
597
 * @param vector Interruption vector.
958 jermar 598
 * @param istate Structure with saved interruption state.
902 jermar 599
 */
958 jermar 600
void data_access_bit_fault(__u64 vector, istate_t *istate)
899 jermar 601
{
1411 jermar 602
	region_register rr;
603
	rid_t rid;
604
	__address va;
902 jermar 605
	pte_t *t;
606
 
1411 jermar 607
	va = istate->cr_ifa;	/* faulting address */
608
	rr.word = rr_read(VA2VRN(va));
609
	rid = rr.map.rid;
610
 
1044 jermar 611
	page_table_lock(AS, true);
1411 jermar 612
	t = page_mapping_find(AS, va);
902 jermar 613
	ASSERT(t && t->p);
614
	if (t && t->p) {
615
		/*
616
		 * Update the Accessed bit in page tables and reinsert
617
		 * the mapping into DTC.
618
		 */
619
		t->a = true;
620
		dtc_pte_copy(t);
1411 jermar 621
	} else {
622
		if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
1621 vana 623
			fault_if_from_uspace(istate,"Page fault at %P",va);
1411 jermar 624
			panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
625
			t->a = true;
626
			itc_pte_copy(t);
627
		}
902 jermar 628
	}
1044 jermar 629
	page_table_unlock(AS, true);
819 vana 630
}
631
 
902 jermar 632
/** Page not present fault handler.
633
 *
634
 * @param vector Interruption vector.
958 jermar 635
 * @param istate Structure with saved interruption state.
902 jermar 636
 */
958 jermar 637
void page_not_present(__u64 vector, istate_t *istate)
819 vana 638
{
902 jermar 639
	region_register rr;
1411 jermar 640
	rid_t rid;
902 jermar 641
	__address va;
642
	pte_t *t;
643
 
958 jermar 644
	va = istate->cr_ifa;	/* faulting address */
1411 jermar 645
	rr.word = rr_read(VA2VRN(va));
646
	rid = rr.map.rid;
647
 
1044 jermar 648
	page_table_lock(AS, true);
902 jermar 649
	t = page_mapping_find(AS, va);
650
	ASSERT(t);
651
 
652
	if (t->p) {
653
		/*
654
		 * If the Present bit is set in page hash table, just copy it
655
		 * and update ITC/DTC.
656
		 */
657
		if (t->x)
658
			itc_pte_copy(t);
659
		else
660
			dtc_pte_copy(t);
1044 jermar 661
		page_table_unlock(AS, true);
902 jermar 662
	} else {
1044 jermar 663
		page_table_unlock(AS, true);
1411 jermar 664
		if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
1621 vana 665
			fault_if_from_uspace(istate,"Page fault at %P",va);
1411 jermar 666
			panic("%s: va=%p, rid=%d\n", __FUNCTION__, va, rid);
902 jermar 667
		}
668
	}
819 vana 669
}