Subversion Repositories HelenOS

Rev

Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
740 jermar 1
/*
2
 * Copyright (C) 2006 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
1702 cejka 29
 /** @addtogroup ia64mm	
30
 * @{
31
 */
32
/** @file
33
 */
34
 
740 jermar 35
/*
36
 * TLB management.
37
 */
38
 
39
#include <mm/tlb.h>
901 jermar 40
#include <mm/asid.h>
902 jermar 41
#include <mm/page.h>
42
#include <mm/as.h>
818 vana 43
#include <arch/mm/tlb.h>
901 jermar 44
#include <arch/mm/page.h>
1210 vana 45
#include <arch/mm/vhpt.h>
819 vana 46
#include <arch/barrier.h>
900 jermar 47
#include <arch/interrupt.h>
928 vana 48
#include <arch/pal/pal.h>
49
#include <arch/asm.h>
899 jermar 50
#include <typedefs.h>
900 jermar 51
#include <panic.h>
993 jermar 52
#include <print.h>
902 jermar 53
#include <arch.h>
1621 vana 54
#include <interrupt.h>
740 jermar 55
 
756 jermar 56
/** Invalidate all TLB entries. */
740 jermar 57
void tlb_invalidate_all(void)
58
{
993 jermar 59
		ipl_t ipl;
928 vana 60
		__address adr;
993 jermar 61
		__u32 count1, count2, stride1, stride2;
928 vana 62
 
63
		int i,j;
64
 
993 jermar 65
		adr = PAL_PTCE_INFO_BASE();
66
		count1 = PAL_PTCE_INFO_COUNT1();
67
		count2 = PAL_PTCE_INFO_COUNT2();
68
		stride1 = PAL_PTCE_INFO_STRIDE1();
69
		stride2 = PAL_PTCE_INFO_STRIDE2();
928 vana 70
 
993 jermar 71
		ipl = interrupts_disable();
928 vana 72
 
993 jermar 73
		for(i = 0; i < count1; i++) {
74
			for(j = 0; j < count2; j++) {
75
				__asm__ volatile (
76
					"ptc.e %0 ;;"
928 vana 77
					:
993 jermar 78
					: "r" (adr)
928 vana 79
				);
993 jermar 80
				adr += stride2;
928 vana 81
			}
993 jermar 82
			adr += stride1;
928 vana 83
		}
84
 
993 jermar 85
		interrupts_restore(ipl);
928 vana 86
 
87
		srlz_d();
88
		srlz_i();
1210 vana 89
#ifdef CONFIG_VHPT
90
		vhpt_invalidate_all();
91
#endif	
740 jermar 92
}
93
 
94
/** Invalidate entries belonging to an address space.
95
 *
96
 * @param asid Address space identifier.
97
 */
98
void tlb_invalidate_asid(asid_t asid)
99
{
935 vana 100
	tlb_invalidate_all();
740 jermar 101
}
818 vana 102
 
935 vana 103
 
947 vana 104
void tlb_invalidate_pages(asid_t asid, __address page, count_t cnt)
935 vana 105
{
944 vana 106
	region_register rr;
107
	bool restore_rr = false;
993 jermar 108
	int b = 0;
109
	int c = cnt;
944 vana 110
 
947 vana 111
	__address va;
993 jermar 112
	va = page;
947 vana 113
 
944 vana 114
	rr.word = rr_read(VA2VRN(va));
115
	if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
116
		/*
117
		 * The selected region register does not contain required RID.
118
		 * Save the old content of the register and replace the RID.
119
		 */
120
		region_register rr0;
121
 
122
		rr0 = rr;
123
		rr0.map.rid = ASID2RID(asid, VA2VRN(va));
124
		rr_write(VA2VRN(va), rr0.word);
125
		srlz_d();
126
		srlz_i();
127
	}
128
 
993 jermar 129
	while(c >>= 1)
130
		b++;
131
	b >>= 1;
944 vana 132
	__u64 ps;
133
 
993 jermar 134
	switch (b) {
944 vana 135
		case 0: /*cnt 1-3*/
993 jermar 136
			ps = PAGE_WIDTH;
944 vana 137
			break;
138
		case 1: /*cnt 4-15*/
947 vana 139
			/*cnt=((cnt-1)/4)+1;*/
993 jermar 140
			ps = PAGE_WIDTH+2;
141
			va &= ~((1<<ps)-1);
944 vana 142
			break;
143
		case 2: /*cnt 16-63*/
947 vana 144
			/*cnt=((cnt-1)/16)+1;*/
993 jermar 145
			ps = PAGE_WIDTH+4;
146
			va &= ~((1<<ps)-1);
944 vana 147
			break;
148
		case 3: /*cnt 64-255*/
947 vana 149
			/*cnt=((cnt-1)/64)+1;*/
993 jermar 150
			ps = PAGE_WIDTH+6;
151
			va &= ~((1<<ps)-1);
944 vana 152
			break;
153
		case 4: /*cnt 256-1023*/
947 vana 154
			/*cnt=((cnt-1)/256)+1;*/
993 jermar 155
			ps = PAGE_WIDTH+8;
156
			va &= ~((1<<ps)-1);
944 vana 157
			break;
158
		case 5: /*cnt 1024-4095*/
947 vana 159
			/*cnt=((cnt-1)/1024)+1;*/
993 jermar 160
			ps = PAGE_WIDTH+10;
161
			va &= ~((1<<ps)-1);
944 vana 162
			break;
163
		case 6: /*cnt 4096-16383*/
947 vana 164
			/*cnt=((cnt-1)/4096)+1;*/
993 jermar 165
			ps = PAGE_WIDTH+12;
166
			va &= ~((1<<ps)-1);
944 vana 167
			break;
168
		case 7: /*cnt 16384-65535*/
169
		case 8: /*cnt 65536-(256K-1)*/
947 vana 170
			/*cnt=((cnt-1)/16384)+1;*/
993 jermar 171
			ps = PAGE_WIDTH+14;
172
			va &= ~((1<<ps)-1);
944 vana 173
			break;
174
		default:
947 vana 175
			/*cnt=((cnt-1)/(16384*16))+1;*/
944 vana 176
			ps=PAGE_WIDTH+18;
177
			va&=~((1<<ps)-1);
178
			break;
179
	}
947 vana 180
	/*cnt+=(page!=va);*/
993 jermar 181
	for(; va<(page+cnt*(PAGE_SIZE)); va += (1<<ps))	{
182
		__asm__ volatile (
947 vana 183
			"ptc.l %0,%1;;"
184
			:
993 jermar 185
			: "r" (va), "r" (ps<<2)
947 vana 186
		);
944 vana 187
	}
188
	srlz_d();
189
	srlz_i();
190
 
191
	if (restore_rr) {
192
		rr_write(VA2VRN(va), rr.word);
193
		srlz_d();
194
		srlz_i();
195
	}
935 vana 196
}
197
 
899 jermar 198
/** Insert data into data translation cache.
199
 *
200
 * @param va Virtual page address.
201
 * @param asid Address space identifier.
202
 * @param entry The rest of TLB entry as required by TLB insertion format.
203
 */
919 jermar 204
void dtc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry)
205
{
899 jermar 206
	tc_mapping_insert(va, asid, entry, true);
207
}
818 vana 208
 
899 jermar 209
/** Insert data into instruction translation cache.
210
 *
211
 * @param va Virtual page address.
212
 * @param asid Address space identifier.
213
 * @param entry The rest of TLB entry as required by TLB insertion format.
214
 */
919 jermar 215
void itc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry)
216
{
899 jermar 217
	tc_mapping_insert(va, asid, entry, false);
218
}
818 vana 219
 
899 jermar 220
/** Insert data into instruction or data translation cache.
221
 *
222
 * @param va Virtual page address.
223
 * @param asid Address space identifier.
224
 * @param entry The rest of TLB entry as required by TLB insertion format.
225
 * @param dtc If true, insert into data translation cache, use instruction translation cache otherwise.
226
 */
227
void tc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtc)
818 vana 228
{
229
	region_register rr;
899 jermar 230
	bool restore_rr = false;
818 vana 231
 
901 jermar 232
	rr.word = rr_read(VA2VRN(va));
233
	if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
899 jermar 234
		/*
235
		 * The selected region register does not contain required RID.
236
		 * Save the old content of the register and replace the RID.
237
		 */
238
		region_register rr0;
818 vana 239
 
899 jermar 240
		rr0 = rr;
901 jermar 241
		rr0.map.rid = ASID2RID(asid, VA2VRN(va));
242
		rr_write(VA2VRN(va), rr0.word);
899 jermar 243
		srlz_d();
244
		srlz_i();
818 vana 245
	}
899 jermar 246
 
247
	__asm__ volatile (
248
		"mov r8=psr;;\n"
900 jermar 249
		"rsm %0;;\n"   			/* PSR_IC_MASK */
899 jermar 250
		"srlz.d;;\n"
251
		"srlz.i;;\n"
252
		"mov cr.ifa=%1\n"		/* va */
253
		"mov cr.itir=%2;;\n"		/* entry.word[1] */
254
		"cmp.eq p6,p7 = %4,r0;;\n"	/* decide between itc and dtc */ 
255
		"(p6) itc.i %3;;\n"
256
		"(p7) itc.d %3;;\n"
257
		"mov psr.l=r8;;\n"
258
		"srlz.d;;\n"
259
		:
900 jermar 260
		: "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc)
261
		: "p6", "p7", "r8"
899 jermar 262
	);
263
 
264
	if (restore_rr) {
901 jermar 265
		rr_write(VA2VRN(va), rr.word);
819 vana 266
		srlz_d();
899 jermar 267
		srlz_i();
818 vana 268
	}
899 jermar 269
}
818 vana 270
 
899 jermar 271
/** Insert data into instruction translation register.
272
 *
273
 * @param va Virtual page address.
274
 * @param asid Address space identifier.
275
 * @param entry The rest of TLB entry as required by TLB insertion format.
276
 * @param tr Translation register.
277
 */
278
void itr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
279
{
280
	tr_mapping_insert(va, asid, entry, false, tr);
281
}
818 vana 282
 
899 jermar 283
/** Insert data into data translation register.
284
 *
285
 * @param va Virtual page address.
286
 * @param asid Address space identifier.
287
 * @param entry The rest of TLB entry as required by TLB insertion format.
288
 * @param tr Translation register.
289
 */
290
void dtr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr)
291
{
292
	tr_mapping_insert(va, asid, entry, true, tr);
818 vana 293
}
294
 
899 jermar 295
/** Insert data into instruction or data translation register.
296
 *
297
 * @param va Virtual page address.
298
 * @param asid Address space identifier.
299
 * @param entry The rest of TLB entry as required by TLB insertion format.
1708 jermar 300
 * @param dtr If true, insert into data translation register, use instruction translation register otherwise.
899 jermar 301
 * @param tr Translation register.
302
 */
303
void tr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr)
818 vana 304
{
305
	region_register rr;
899 jermar 306
	bool restore_rr = false;
818 vana 307
 
901 jermar 308
	rr.word = rr_read(VA2VRN(va));
309
	if ((restore_rr = (rr.map.rid != ASID2RID(asid, VA2VRN(va))))) {
899 jermar 310
		/*
311
		 * The selected region register does not contain required RID.
312
		 * Save the old content of the register and replace the RID.
313
		 */
314
		region_register rr0;
818 vana 315
 
899 jermar 316
		rr0 = rr;
901 jermar 317
		rr0.map.rid = ASID2RID(asid, VA2VRN(va));
318
		rr_write(VA2VRN(va), rr0.word);
899 jermar 319
		srlz_d();
320
		srlz_i();
321
	}
818 vana 322
 
899 jermar 323
	__asm__ volatile (
324
		"mov r8=psr;;\n"
900 jermar 325
		"rsm %0;;\n"			/* PSR_IC_MASK */
899 jermar 326
		"srlz.d;;\n"
327
		"srlz.i;;\n"
328
		"mov cr.ifa=%1\n"        	/* va */		 
329
		"mov cr.itir=%2;;\n"		/* entry.word[1] */ 
330
		"cmp.eq p6,p7=%5,r0;;\n"	/* decide between itr and dtr */
331
		"(p6) itr.i itr[%4]=%3;;\n"
332
		"(p7) itr.d dtr[%4]=%3;;\n"
333
		"mov psr.l=r8;;\n"
334
		"srlz.d;;\n"
335
		:
900 jermar 336
		: "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr)
337
		: "p6", "p7", "r8"
899 jermar 338
	);
339
 
340
	if (restore_rr) {
901 jermar 341
		rr_write(VA2VRN(va), rr.word);
819 vana 342
		srlz_d();
899 jermar 343
		srlz_i();
818 vana 344
	}
899 jermar 345
}
818 vana 346
 
901 jermar 347
/** Insert data into DTLB.
348
 *
1675 jermar 349
 * @param page Virtual page address including VRN bits.
350
 * @param frame Physical frame address.
901 jermar 351
 * @param dtr If true, insert into data translation register, use data translation cache otherwise.
352
 * @param tr Translation register if dtr is true, ignored otherwise.
353
 */
902 jermar 354
void dtlb_kernel_mapping_insert(__address page, __address frame, bool dtr, index_t tr)
901 jermar 355
{
356
	tlb_entry_t entry;
357
 
358
	entry.word[0] = 0;
359
	entry.word[1] = 0;
360
 
361
	entry.p = true;			/* present */
362
	entry.ma = MA_WRITEBACK;
363
	entry.a = true;			/* already accessed */
364
	entry.d = true;			/* already dirty */
365
	entry.pl = PL_KERNEL;
366
	entry.ar = AR_READ | AR_WRITE;
367
	entry.ppn = frame >> PPN_SHIFT;
368
	entry.ps = PAGE_WIDTH;
369
 
370
	if (dtr)
371
		dtr_mapping_insert(page, ASID_KERNEL, entry, tr);
372
	else
373
		dtc_mapping_insert(page, ASID_KERNEL, entry);
374
}
375
 
1675 jermar 376
/** Purge kernel entries from DTR.
377
 *
378
 * Purge DTR entries used by the kernel.
379
 *
380
 * @param page Virtual page address including VRN bits.
381
 * @param width Width of the purge in bits.
382
 */
383
void dtr_purge(__address page, count_t width)
384
{
385
	__asm__ volatile ("ptr.d %0, %1\n" : : "r" (page), "r" (width<<2));
386
}
387
 
388
 
902 jermar 389
/** Copy content of PTE into data translation cache.
390
 *
391
 * @param t PTE.
392
 */
393
void dtc_pte_copy(pte_t *t)
394
{
395
	tlb_entry_t entry;
396
 
397
	entry.word[0] = 0;
398
	entry.word[1] = 0;
399
 
400
	entry.p = t->p;
401
	entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
402
	entry.a = t->a;
403
	entry.d = t->d;
404
	entry.pl = t->k ? PL_KERNEL : PL_USER;
405
	entry.ar = t->w ? AR_WRITE : AR_READ;
406
	entry.ppn = t->frame >> PPN_SHIFT;
407
	entry.ps = PAGE_WIDTH;
408
 
409
	dtc_mapping_insert(t->page, t->as->asid, entry);
1210 vana 410
#ifdef CONFIG_VHPT
411
	vhpt_mapping_insert(t->page, t->as->asid, entry);
412
#endif	
902 jermar 413
}
414
 
415
/** Copy content of PTE into instruction translation cache.
416
 *
417
 * @param t PTE.
418
 */
419
void itc_pte_copy(pte_t *t)
420
{
421
	tlb_entry_t entry;
422
 
423
	entry.word[0] = 0;
424
	entry.word[1] = 0;
425
 
426
	ASSERT(t->x);
427
 
428
	entry.p = t->p;
429
	entry.ma = t->c ? MA_WRITEBACK : MA_UNCACHEABLE;
430
	entry.a = t->a;
431
	entry.pl = t->k ? PL_KERNEL : PL_USER;
432
	entry.ar = t->x ? (AR_EXECUTE | AR_READ) : AR_READ;
433
	entry.ppn = t->frame >> PPN_SHIFT;
434
	entry.ps = PAGE_WIDTH;
435
 
436
	itc_mapping_insert(t->page, t->as->asid, entry);
1210 vana 437
#ifdef CONFIG_VHPT
438
	vhpt_mapping_insert(t->page, t->as->asid, entry);
439
#endif	
902 jermar 440
}
441
 
442
/** Instruction TLB fault handler for faults with VHPT turned off.
443
 *
444
 * @param vector Interruption vector.
958 jermar 445
 * @param istate Structure with saved interruption state.
902 jermar 446
 */
958 jermar 447
void alternate_instruction_tlb_fault(__u64 vector, istate_t *istate)
899 jermar 448
{
902 jermar 449
	region_register rr;
1411 jermar 450
	rid_t rid;
902 jermar 451
	__address va;
452
	pte_t *t;
453
 
958 jermar 454
	va = istate->cr_ifa;	/* faulting address */
1411 jermar 455
	rr.word = rr_read(VA2VRN(va));
456
	rid = rr.map.rid;
457
 
1044 jermar 458
	page_table_lock(AS, true);
902 jermar 459
	t = page_mapping_find(AS, va);
460
	if (t) {
461
		/*
462
		 * The mapping was found in software page hash table.
463
		 * Insert it into data translation cache.
464
		 */
465
		itc_pte_copy(t);
1044 jermar 466
		page_table_unlock(AS, true);
902 jermar 467
	} else {
468
		/*
469
		 * Forward the page fault to address space page fault handler.
470
		 */
1044 jermar 471
		page_table_unlock(AS, true);
1411 jermar 472
		if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
1735 decky 473
			fault_if_from_uspace(istate,"Page fault at %p",va);
1411 jermar 474
			panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
902 jermar 475
		}
476
	}
899 jermar 477
}
818 vana 478
 
902 jermar 479
/** Data TLB fault handler for faults with VHPT turned off.
901 jermar 480
 *
481
 * @param vector Interruption vector.
958 jermar 482
 * @param istate Structure with saved interruption state.
901 jermar 483
 */
958 jermar 484
void alternate_data_tlb_fault(__u64 vector, istate_t *istate)
899 jermar 485
{
901 jermar 486
	region_register rr;
487
	rid_t rid;
488
	__address va;
902 jermar 489
	pte_t *t;
901 jermar 490
 
958 jermar 491
	va = istate->cr_ifa;	/* faulting address */
901 jermar 492
	rr.word = rr_read(VA2VRN(va));
493
	rid = rr.map.rid;
494
	if (RID2ASID(rid) == ASID_KERNEL) {
495
		if (VA2VRN(va) == VRN_KERNEL) {
496
			/*
497
			 * Provide KA2PA(identity) mapping for faulting piece of
498
			 * kernel address space.
499
			 */
902 jermar 500
			dtlb_kernel_mapping_insert(va, KA2PA(va), false, 0);
901 jermar 501
			return;
502
		}
503
	}
919 jermar 504
 
1044 jermar 505
	page_table_lock(AS, true);
902 jermar 506
	t = page_mapping_find(AS, va);
507
	if (t) {
508
		/*
509
		 * The mapping was found in software page hash table.
510
		 * Insert it into data translation cache.
511
		 */
512
		dtc_pte_copy(t);
1044 jermar 513
		page_table_unlock(AS, true);
902 jermar 514
	} else {
515
		/*
516
		 * Forward the page fault to address space page fault handler.
517
		 */
1044 jermar 518
		page_table_unlock(AS, true);
1411 jermar 519
		if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
1735 decky 520
			fault_if_from_uspace(istate,"Page fault at %p",va);
1221 decky 521
			panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
902 jermar 522
		}
523
	}
818 vana 524
}
525
 
902 jermar 526
/** Data nested TLB fault handler.
527
 *
528
 * This fault should not occur.
529
 *
530
 * @param vector Interruption vector.
958 jermar 531
 * @param istate Structure with saved interruption state.
902 jermar 532
 */
958 jermar 533
void data_nested_tlb_fault(__u64 vector, istate_t *istate)
899 jermar 534
{
535
	panic("%s\n", __FUNCTION__);
536
}
818 vana 537
 
902 jermar 538
/** Data Dirty bit fault handler.
539
 *
540
 * @param vector Interruption vector.
958 jermar 541
 * @param istate Structure with saved interruption state.
902 jermar 542
 */
958 jermar 543
void data_dirty_bit_fault(__u64 vector, istate_t *istate)
819 vana 544
{
1411 jermar 545
	region_register rr;
546
	rid_t rid;
547
	__address va;
902 jermar 548
	pte_t *t;
1411 jermar 549
 
550
	va = istate->cr_ifa;	/* faulting address */
551
	rr.word = rr_read(VA2VRN(va));
552
	rid = rr.map.rid;
902 jermar 553
 
1044 jermar 554
	page_table_lock(AS, true);
1411 jermar 555
	t = page_mapping_find(AS, va);
902 jermar 556
	ASSERT(t && t->p);
1411 jermar 557
	if (t && t->p && t->w) {
902 jermar 558
		/*
559
		 * Update the Dirty bit in page tables and reinsert
560
		 * the mapping into DTC.
561
		 */
562
		t->d = true;
563
		dtc_pte_copy(t);
1411 jermar 564
	} else {
565
		if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
1735 decky 566
			fault_if_from_uspace(istate,"Page fault at %p",va);
1411 jermar 567
			panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
568
			t->d = true;
569
			dtc_pte_copy(t);
570
		}
902 jermar 571
	}
1044 jermar 572
	page_table_unlock(AS, true);
899 jermar 573
}
819 vana 574
 
902 jermar 575
/** Instruction access bit fault handler.
576
 *
577
 * @param vector Interruption vector.
958 jermar 578
 * @param istate Structure with saved interruption state.
902 jermar 579
 */
958 jermar 580
void instruction_access_bit_fault(__u64 vector, istate_t *istate)
899 jermar 581
{
1411 jermar 582
	region_register rr;
583
	rid_t rid;
584
	__address va;
585
	pte_t *t;	
902 jermar 586
 
1411 jermar 587
	va = istate->cr_ifa;	/* faulting address */
588
	rr.word = rr_read(VA2VRN(va));
589
	rid = rr.map.rid;
590
 
1044 jermar 591
	page_table_lock(AS, true);
1411 jermar 592
	t = page_mapping_find(AS, va);
902 jermar 593
	ASSERT(t && t->p);
1411 jermar 594
	if (t && t->p && t->x) {
902 jermar 595
		/*
596
		 * Update the Accessed bit in page tables and reinsert
597
		 * the mapping into ITC.
598
		 */
599
		t->a = true;
600
		itc_pte_copy(t);
1411 jermar 601
	} else {
602
		if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
1735 decky 603
			fault_if_from_uspace(istate,"Page fault at %p",va);
1411 jermar 604
			panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
605
			t->a = true;
606
			itc_pte_copy(t);
607
		}
902 jermar 608
	}
1044 jermar 609
	page_table_unlock(AS, true);
899 jermar 610
}
819 vana 611
 
902 jermar 612
/** Data access bit fault handler.
613
 *
614
 * @param vector Interruption vector.
958 jermar 615
 * @param istate Structure with saved interruption state.
902 jermar 616
 */
958 jermar 617
void data_access_bit_fault(__u64 vector, istate_t *istate)
899 jermar 618
{
1411 jermar 619
	region_register rr;
620
	rid_t rid;
621
	__address va;
902 jermar 622
	pte_t *t;
623
 
1411 jermar 624
	va = istate->cr_ifa;	/* faulting address */
625
	rr.word = rr_read(VA2VRN(va));
626
	rid = rr.map.rid;
627
 
1044 jermar 628
	page_table_lock(AS, true);
1411 jermar 629
	t = page_mapping_find(AS, va);
902 jermar 630
	ASSERT(t && t->p);
631
	if (t && t->p) {
632
		/*
633
		 * Update the Accessed bit in page tables and reinsert
634
		 * the mapping into DTC.
635
		 */
636
		t->a = true;
637
		dtc_pte_copy(t);
1411 jermar 638
	} else {
639
		if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
1735 decky 640
			fault_if_from_uspace(istate,"Page fault at %p",va);
1411 jermar 641
			panic("%s: va=%p, rid=%d, iip=%p\n", __FUNCTION__, va, rid, istate->cr_iip);
642
			t->a = true;
643
			itc_pte_copy(t);
644
		}
902 jermar 645
	}
1044 jermar 646
	page_table_unlock(AS, true);
819 vana 647
}
648
 
902 jermar 649
/** Page not present fault handler.
650
 *
651
 * @param vector Interruption vector.
958 jermar 652
 * @param istate Structure with saved interruption state.
902 jermar 653
 */
958 jermar 654
void page_not_present(__u64 vector, istate_t *istate)
819 vana 655
{
902 jermar 656
	region_register rr;
1411 jermar 657
	rid_t rid;
902 jermar 658
	__address va;
659
	pte_t *t;
660
 
958 jermar 661
	va = istate->cr_ifa;	/* faulting address */
1411 jermar 662
	rr.word = rr_read(VA2VRN(va));
663
	rid = rr.map.rid;
664
 
1044 jermar 665
	page_table_lock(AS, true);
902 jermar 666
	t = page_mapping_find(AS, va);
667
	ASSERT(t);
668
 
669
	if (t->p) {
670
		/*
671
		 * If the Present bit is set in page hash table, just copy it
672
		 * and update ITC/DTC.
673
		 */
674
		if (t->x)
675
			itc_pte_copy(t);
676
		else
677
			dtc_pte_copy(t);
1044 jermar 678
		page_table_unlock(AS, true);
902 jermar 679
	} else {
1044 jermar 680
		page_table_unlock(AS, true);
1411 jermar 681
		if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
1735 decky 682
			fault_if_from_uspace(istate,"Page fault at %p",va);
1411 jermar 683
			panic("%s: va=%p, rid=%d\n", __FUNCTION__, va, rid);
902 jermar 684
		}
685
	}
819 vana 686
}
1702 cejka 687
 
688
 /** @}
689
 */
690