Subversion Repositories HelenOS

Rev

Rev 3763 | Rev 3777 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3763 Rev 3766
Line 90... Line 90...
90
#endif  
90
#endif  
91
}
91
}
92
 
92
 
93
/** Invalidate entries belonging to an address space.
93
/** Invalidate entries belonging to an address space.
94
 *
94
 *
95
 * @param asid Address space identifier.
95
 * @param asid      Address space identifier.
96
 */
96
 */
97
void tlb_invalidate_asid(asid_t asid)
97
void tlb_invalidate_asid(asid_t asid)
98
{
98
{
99
    tlb_invalidate_all();
99
    tlb_invalidate_all();
100
}
100
}
Line 129... Line 129...
129
        b++;
129
        b++;
130
    b >>= 1;
130
    b >>= 1;
131
    uint64_t ps;
131
    uint64_t ps;
132
   
132
   
133
    switch (b) {
133
    switch (b) {
134
    case 0: /*cnt 1-3*/
134
    case 0: /* cnt 1 - 3 */
135
        ps = PAGE_WIDTH;
135
        ps = PAGE_WIDTH;
136
        break;
136
        break;
137
    case 1: /*cnt 4-15*/
137
    case 1: /* cnt 4 - 15 */
138
        ps = PAGE_WIDTH+2;
138
        ps = PAGE_WIDTH + 2;
139
        va &= ~((1<<ps)-1);
139
        va &= ~((1 << ps) - 1);
140
        break;
140
        break;
141
    case 2: /*cnt 16-63*/
141
    case 2: /* cnt 16 - 63 */
142
        ps = PAGE_WIDTH+4;
142
        ps = PAGE_WIDTH + 4;
143
        va &= ~((1<<ps)-1);
143
        va &= ~((1 << ps) - 1);
144
        break;
144
        break;
145
    case 3: /*cnt 64-255*/
145
    case 3: /* cnt 64 - 255 */
146
        ps = PAGE_WIDTH+6;
146
        ps = PAGE_WIDTH + 6;
147
        va &= ~((1<<ps)-1);
147
        va &= ~((1 << ps) - 1);
148
        break;
148
        break;
149
    case 4: /*cnt 256-1023*/
149
    case 4: /* cnt 256 - 1023 */
150
        ps = PAGE_WIDTH+8;
150
        ps = PAGE_WIDTH + 8;
151
        va &= ~((1<<ps)-1);
151
        va &= ~((1 << ps) - 1);
152
        break;
152
        break;
153
    case 5: /*cnt 1024-4095*/
153
    case 5: /* cnt 1024 - 4095 */
154
        ps = PAGE_WIDTH+10;
154
        ps = PAGE_WIDTH + 10;
155
        va &= ~((1<<ps)-1);
155
        va &= ~((1 << ps) - 1);
156
        break;
156
        break;
157
    case 6: /*cnt 4096-16383*/
157
    case 6: /* cnt 4096 - 16383 */
158
        ps = PAGE_WIDTH+12;
158
        ps = PAGE_WIDTH + 12;
159
        va &= ~((1<<ps)-1);
159
        va &= ~((1 << ps) - 1);
160
        break;
160
        break;
161
    case 7: /*cnt 16384-65535*/
161
    case 7: /* cnt 16384 - 65535 */
162
    case 8: /*cnt 65536-(256K-1)*/
162
    case 8: /* cnt 65536 - (256K - 1) */
163
        ps = PAGE_WIDTH+14;
163
        ps = PAGE_WIDTH + 14;
164
        va &= ~((1<<ps)-1);
164
        va &= ~((1 << ps) - 1);
165
        break;
165
        break;
166
    default:
166
    default:
167
        ps=PAGE_WIDTH+18;
167
        ps = PAGE_WIDTH + 18;
168
        va&=~((1<<ps)-1);
168
        va &= ~((1 << ps) - 1);
169
        break;
169
        break;
170
    }
170
    }
171
    for(; va<(page+cnt*(PAGE_SIZE)); va += (1<<ps)) {
171
    for(; va < (page + cnt * PAGE_SIZE); va += (1 << ps))
172
        asm volatile (
-
 
173
            "ptc.l %0,%1;;"
-
 
174
            :
-
 
175
            : "r" (va), "r" (ps<<2)
172
        asm volatile ("ptc.l %0, %1;;" :: "r" (va), "r" (ps << 2));
176
        );
-
 
177
    }
-
 
178
    srlz_d();
173
    srlz_d();
179
    srlz_i();
174
    srlz_i();
180
   
175
   
181
    if (restore_rr) {
176
    if (restore_rr) {
182
        rr_write(VA2VRN(va), rr.word);
177
        rr_write(VA2VRN(va), rr.word);
Line 185... Line 180...
185
    }
180
    }
186
}
181
}
187
 
182
 
188
/** Insert data into data translation cache.
183
/** Insert data into data translation cache.
189
 *
184
 *
190
 * @param va Virtual page address.
185
 * @param va        Virtual page address.
191
 * @param asid Address space identifier.
186
 * @param asid      Address space identifier.
192
 * @param entry The rest of TLB entry as required by TLB insertion format.
187
 * @param entry     The rest of TLB entry as required by TLB insertion
-
 
188
 *          format.
193
 */
189
 */
194
void dtc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
190
void dtc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
195
{
191
{
196
    tc_mapping_insert(va, asid, entry, true);
192
    tc_mapping_insert(va, asid, entry, true);
197
}
193
}
198
 
194
 
199
/** Insert data into instruction translation cache.
195
/** Insert data into instruction translation cache.
200
 *
196
 *
201
 * @param va Virtual page address.
197
 * @param va        Virtual page address.
202
 * @param asid Address space identifier.
198
 * @param asid      Address space identifier.
203
 * @param entry The rest of TLB entry as required by TLB insertion format.
199
 * @param entry     The rest of TLB entry as required by TLB insertion
-
 
200
 *          format.
204
 */
201
 */
205
void itc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
202
void itc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry)
206
{
203
{
207
    tc_mapping_insert(va, asid, entry, false);
204
    tc_mapping_insert(va, asid, entry, false);
208
}
205
}
209
 
206
 
210
/** Insert data into instruction or data translation cache.
207
/** Insert data into instruction or data translation cache.
211
 *
208
 *
212
 * @param va Virtual page address.
209
 * @param va        Virtual page address.
213
 * @param asid Address space identifier.
210
 * @param asid      Address space identifier.
214
 * @param entry The rest of TLB entry as required by TLB insertion format.
211
 * @param entry     The rest of TLB entry as required by TLB insertion
-
 
212
 *          format.
215
 * @param dtc If true, insert into data translation cache, use instruction translation cache otherwise.
213
 * @param dtc       If true, insert into data translation cache, use
-
 
214
 *          instruction translation cache otherwise.
216
 */
215
 */
217
void tc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtc)
216
void tc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtc)
218
{
217
{
219
    region_register rr;
218
    region_register rr;
220
    bool restore_rr = false;
219
    bool restore_rr = false;
Line 233... Line 232...
233
        srlz_d();
232
        srlz_d();
234
        srlz_i();
233
        srlz_i();
235
    }
234
    }
236
   
235
   
237
    asm volatile (
236
    asm volatile (
238
        "mov r8=psr;;\n"
237
        "mov r8 = psr;;\n"
239
        "rsm %0;;\n"            /* PSR_IC_MASK */
238
        "rsm %0;;\n"            /* PSR_IC_MASK */
240
        "srlz.d;;\n"
239
        "srlz.d;;\n"
241
        "srlz.i;;\n"
240
        "srlz.i;;\n"
242
        "mov cr.ifa=%1\n"       /* va */
241
        "mov cr.ifa = %1\n"     /* va */
243
        "mov cr.itir=%2;;\n"        /* entry.word[1] */
242
        "mov cr.itir = %2;;\n"      /* entry.word[1] */
244
        "cmp.eq p6,p7 = %4,r0;;\n"  /* decide between itc and dtc */
243
        "cmp.eq p6,p7 = %4,r0;;\n"  /* decide between itc and dtc */
245
        "(p6) itc.i %3;;\n"
244
        "(p6) itc.i %3;;\n"
246
        "(p7) itc.d %3;;\n"
245
        "(p7) itc.d %3;;\n"
247
        "mov psr.l=r8;;\n"
246
        "mov psr.l = r8;;\n"
248
        "srlz.d;;\n"
247
        "srlz.d;;\n"
249
        :
248
        :
250
        : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (dtc)
249
        : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]),
-
 
250
            "r" (entry.word[0]), "r" (dtc)
251
        : "p6", "p7", "r8"
251
        : "p6", "p7", "r8"
252
    );
252
    );
253
   
253
   
254
    if (restore_rr) {
254
    if (restore_rr) {
255
        rr_write(VA2VRN(va), rr.word);
255
        rr_write(VA2VRN(va), rr.word);
Line 258... Line 258...
258
    }
258
    }
259
}
259
}
260
 
260
 
261
/** Insert data into instruction translation register.
261
/** Insert data into instruction translation register.
262
 *
262
 *
263
 * @param va Virtual page address.
263
 * @param va        Virtual page address.
264
 * @param asid Address space identifier.
264
 * @param asid      Address space identifier.
265
 * @param entry The rest of TLB entry as required by TLB insertion format.
265
 * @param entry     The rest of TLB entry as required by TLB insertion
-
 
266
 *          format.
266
 * @param tr Translation register.
267
 * @param tr        Translation register.
267
 */
268
 */
-
 
269
void
268
void itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr)
270
itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr)
269
{
271
{
270
    tr_mapping_insert(va, asid, entry, false, tr);
272
    tr_mapping_insert(va, asid, entry, false, tr);
271
}
273
}
272
 
274
 
273
/** Insert data into data translation register.
275
/** Insert data into data translation register.
274
 *
276
 *
275
 * @param va Virtual page address.
277
 * @param va        Virtual page address.
276
 * @param asid Address space identifier.
278
 * @param asid      Address space identifier.
277
 * @param entry The rest of TLB entry as required by TLB insertion format.
279
 * @param entry     The rest of TLB entry as required by TLB insertion
-
 
280
 *          format.
278
 * @param tr Translation register.
281
 * @param tr        Translation register.
279
 */
282
 */
-
 
283
void
280
void dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr)
284
dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr)
281
{
285
{
282
    tr_mapping_insert(va, asid, entry, true, tr);
286
    tr_mapping_insert(va, asid, entry, true, tr);
283
}
287
}
284
 
288
 
285
/** Insert data into instruction or data translation register.
289
/** Insert data into instruction or data translation register.
286
 *
290
 *
287
 * @param va Virtual page address.
291
 * @param va        Virtual page address.
288
 * @param asid Address space identifier.
292
 * @param asid      Address space identifier.
289
 * @param entry The rest of TLB entry as required by TLB insertion format.
293
 * @param entry     The rest of TLB entry as required by TLB insertion
-
 
294
 *          format.
290
 * @param dtr If true, insert into data translation register, use instruction translation register otherwise.
295
 * @param dtr       If true, insert into data translation register, use
-
 
296
 *          instruction translation register otherwise.
291
 * @param tr Translation register.
297
 * @param tr        Translation register.
292
 */
298
 */
-
 
299
void
293
void tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr)
300
tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr,
-
 
301
    index_t tr)
294
{
302
{
295
    region_register rr;
303
    region_register rr;
296
    bool restore_rr = false;
304
    bool restore_rr = false;
297
 
305
 
298
    rr.word = rr_read(VA2VRN(va));
306
    rr.word = rr_read(VA2VRN(va));
Line 309... Line 317...
309
        srlz_d();
317
        srlz_d();
310
        srlz_i();
318
        srlz_i();
311
    }
319
    }
312
 
320
 
313
    asm volatile (
321
    asm volatile (
314
        "mov r8=psr;;\n"
322
        "mov r8 = psr;;\n"
315
        "rsm %0;;\n"            /* PSR_IC_MASK */
323
        "rsm %0;;\n"            /* PSR_IC_MASK */
316
        "srlz.d;;\n"
324
        "srlz.d;;\n"
317
        "srlz.i;;\n"
325
        "srlz.i;;\n"
318
        "mov cr.ifa=%1\n"           /* va */         
326
        "mov cr.ifa = %1\n"         /* va */         
319
        "mov cr.itir=%2;;\n"        /* entry.word[1] */
327
        "mov cr.itir = %2;;\n"      /* entry.word[1] */
320
        "cmp.eq p6,p7=%5,r0;;\n"    /* decide between itr and dtr */
328
        "cmp.eq p6,p7 = %5,r0;;\n"  /* decide between itr and dtr */
321
        "(p6) itr.i itr[%4]=%3;;\n"
329
        "(p6) itr.i itr[%4] = %3;;\n"
322
        "(p7) itr.d dtr[%4]=%3;;\n"
330
        "(p7) itr.d dtr[%4] = %3;;\n"
323
        "mov psr.l=r8;;\n"
331
        "mov psr.l = r8;;\n"
324
        "srlz.d;;\n"
332
        "srlz.d;;\n"
325
        :
333
        :
326
        : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]), "r" (entry.word[0]), "r" (tr), "r" (dtr)
334
        : "i" (PSR_IC_MASK), "r" (va), "r" (entry.word[1]),
-
 
335
            "r" (entry.word[0]), "r" (tr), "r" (dtr)
327
        : "p6", "p7", "r8"
336
        : "p6", "p7", "r8"
328
    );
337
    );
329
   
338
   
330
    if (restore_rr) {
339
    if (restore_rr) {
331
        rr_write(VA2VRN(va), rr.word);
340
        rr_write(VA2VRN(va), rr.word);
Line 334... Line 343...
334
    }
343
    }
335
}
344
}
336
 
345
 
337
/** Insert data into DTLB.
346
/** Insert data into DTLB.
338
 *
347
 *
339
 * @param page Virtual page address including VRN bits.
348
 * @param page      Virtual page address including VRN bits.
340
 * @param frame Physical frame address.
349
 * @param frame     Physical frame address.
341
 * @param dtr If true, insert into data translation register, use data translation cache otherwise.
350
 * @param dtr       If true, insert into data translation register, use data
-
 
351
 *          translation cache otherwise.
342
 * @param tr Translation register if dtr is true, ignored otherwise.
352
 * @param tr        Translation register if dtr is true, ignored otherwise.
343
 */
353
 */
-
 
354
void
344
void dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr, index_t tr)
355
dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr,
-
 
356
    index_t tr)
345
{
357
{
346
    tlb_entry_t entry;
358
    tlb_entry_t entry;
347
   
359
   
348
    entry.word[0] = 0;
360
    entry.word[0] = 0;
349
    entry.word[1] = 0;
361
    entry.word[1] = 0;
Line 365... Line 377...
365
 
377
 
366
/** Purge kernel entries from DTR.
378
/** Purge kernel entries from DTR.
367
 *
379
 *
368
 * Purge DTR entries used by the kernel.
380
 * Purge DTR entries used by the kernel.
369
 *
381
 *
370
 * @param page Virtual page address including VRN bits.
382
 * @param page      Virtual page address including VRN bits.
371
 * @param width Width of the purge in bits.
383
 * @param width     Width of the purge in bits.
372
 */
384
 */
373
void dtr_purge(uintptr_t page, count_t width)
385
void dtr_purge(uintptr_t page, count_t width)
374
{
386
{
375
    asm volatile ("ptr.d %0, %1\n" : : "r" (page), "r" (width<<2));
387
    asm volatile ("ptr.d %0, %1\n" : : "r" (page), "r" (width << 2));
376
}
388
}
377
 
389
 
378
 
390
 
379
/** Copy content of PTE into data translation cache.
391
/** Copy content of PTE into data translation cache.
380
 *
392
 *
381
 * @param t PTE.
393
 * @param t     PTE.
382
 */
394
 */
383
void dtc_pte_copy(pte_t *t)
395
void dtc_pte_copy(pte_t *t)
384
{
396
{
385
    tlb_entry_t entry;
397
    tlb_entry_t entry;
386
 
398
 
Line 402... Line 414...
402
#endif  
414
#endif  
403
}
415
}
404
 
416
 
405
/** Copy content of PTE into instruction translation cache.
417
/** Copy content of PTE into instruction translation cache.
406
 *
418
 *
407
 * @param t PTE.
419
 * @param t     PTE.
408
 */
420
 */
409
void itc_pte_copy(pte_t *t)
421
void itc_pte_copy(pte_t *t)
410
{
422
{
411
    tlb_entry_t entry;
423
    tlb_entry_t entry;
412
 
424
 
Line 429... Line 441...
429
#endif  
441
#endif  
430
}
442
}
431
 
443
 
432
/** Instruction TLB fault handler for faults with VHPT turned off.
444
/** Instruction TLB fault handler for faults with VHPT turned off.
433
 *
445
 *
434
 * @param vector Interruption vector.
446
 * @param vector        Interruption vector.
435
 * @param istate Structure with saved interruption state.
447
 * @param istate        Structure with saved interruption state.
436
 */
448
 */
437
void alternate_instruction_tlb_fault(uint64_t vector, istate_t *istate)
449
void alternate_instruction_tlb_fault(uint64_t vector, istate_t *istate)
438
{
450
{
439
    region_register rr;
451
    region_register rr;
440
    rid_t rid;
452
    rid_t rid;
Line 459... Line 471...
459
         * Forward the page fault to address space page fault handler.
471
         * Forward the page fault to address space page fault handler.
460
         */
472
         */
461
        page_table_unlock(AS, true);
473
        page_table_unlock(AS, true);
462
        if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
474
        if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
463
            fault_if_from_uspace(istate,"Page fault at %p",va);
475
            fault_if_from_uspace(istate,"Page fault at %p",va);
464
            panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
476
            panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid,
-
 
477
                istate->cr_iip);
465
        }
478
        }
466
    }
479
    }
467
}
480
}
468
 
481
 
469
 
-
 
470
 
-
 
471
static int is_io_page_accessible(int page)
482
static int is_io_page_accessible(int page)
472
{
483
{
-
 
484
    if (TASK->arch.iomap)
473
    if(TASK->arch.iomap) return bitmap_get(TASK->arch.iomap,page);
485
        return bitmap_get(TASK->arch.iomap,page);
-
 
486
    else
474
    else return 0;
487
        return 0;
475
}
488
}
476
 
489
 
477
#define IO_FRAME_BASE 0xFFFFC000000
490
#define IO_FRAME_BASE 0xFFFFC000000
478
 
491
 
-
 
492
/**
479
/** There is special handling of memmaped lagacy io, because
493
 * There is special handling of memory mapped legacy io, because of 4KB sized
480
 * of 4KB sized access
-
 
481
 * only for userspace
494
 * access for userspace.
482
 *
-
 
483
 * @param va virtual address of page fault
-
 
484
 * @param istate Structure with saved interruption state.
-
 
485
 *
495
 *
-
 
496
 * @param va        Virtual address of page fault.
-
 
497
 * @param istate    Structure with saved interruption state.
486
 *
498
 *
487
 * @return 1 on success, 0 on fail
499
 * @return      One on success, zero on failure.
488
 */
500
 */
489
static int try_memmap_io_insertion(uintptr_t va, istate_t *istate)
501
static int try_memmap_io_insertion(uintptr_t va, istate_t *istate)
490
{
502
{
491
    if((va >= IO_OFFSET ) && (va < IO_OFFSET + (1<<IO_PAGE_WIDTH)))
503
    if ((va >= IO_OFFSET ) && (va < IO_OFFSET + (1 << IO_PAGE_WIDTH))) {
492
        if(TASK){
504
        if (TASK) {
493
           
-
 
494
            uint64_t io_page=(va &  ((1<<IO_PAGE_WIDTH)-1)) >> (USPACE_IO_PAGE_WIDTH);
505
            uint64_t io_page = (va & ((1 << IO_PAGE_WIDTH) - 1)) >>
-
 
506
                USPACE_IO_PAGE_WIDTH;
-
 
507
 
495
            if(is_io_page_accessible(io_page)){
508
            if (is_io_page_accessible(io_page)) {
496
                uint64_t page,frame;
509
                uint64_t page, frame;
497
 
510
 
-
 
511
                page = IO_OFFSET +
498
                page = IO_OFFSET + (1 << USPACE_IO_PAGE_WIDTH) * io_page;
512
                    (1 << USPACE_IO_PAGE_WIDTH) * io_page;
-
 
513
                frame = IO_FRAME_BASE +
499
                frame = IO_FRAME_BASE + (1 << USPACE_IO_PAGE_WIDTH) * io_page;
514
                    (1 << USPACE_IO_PAGE_WIDTH) * io_page;
500
 
-
 
501
 
515
 
502
                tlb_entry_t entry;
516
                tlb_entry_t entry;
503
   
517
   
504
                entry.word[0] = 0;
518
                entry.word[0] = 0;
505
                entry.word[1] = 0;
519
                entry.word[1] = 0;
506
   
520
   
507
                entry.p = true;         /* present */
521
                entry.p = true;     /* present */
508
                entry.ma = MA_UNCACHEABLE;     
522
                entry.ma = MA_UNCACHEABLE;     
509
                entry.a = true;         /* already accessed */
523
                entry.a = true;     /* already accessed */
510
                entry.d = true;         /* already dirty */
524
                entry.d = true;     /* already dirty */
511
                entry.pl = PL_USER;
525
                entry.pl = PL_USER;
512
                entry.ar = AR_READ | AR_WRITE;
526
                entry.ar = AR_READ | AR_WRITE;
513
                entry.ppn = frame >> PPN_SHIFT;
527
                entry.ppn = frame >> PPN_SHIFT;
514
                entry.ps = USPACE_IO_PAGE_WIDTH;
528
                entry.ps = USPACE_IO_PAGE_WIDTH;
515
   
529
   
516
                dtc_mapping_insert(page, TASK->as->asid, entry);
530
                dtc_mapping_insert(page, TASK->as->asid, entry);
517
                return 1;
531
                return 1;
518
            }else {
532
            } else {
519
                fault_if_from_uspace(istate,"IO access fault at %p",va);
533
                fault_if_from_uspace(istate,
520
                return 0;
534
                    "IO access fault at %p", va);
521
            }      
535
            }
522
        } else
536
        }
523
            return 0;
-
 
524
    else
537
    }
525
        return 0;
-
 
526
       
538
       
527
    return 0;
539
    return 0;
528
 
-
 
529
}
540
}
530
 
541
 
531
 
-
 
532
 
-
 
533
 
-
 
534
/** Data TLB fault handler for faults with VHPT turned off.
542
/** Data TLB fault handler for faults with VHPT turned off.
535
 *
543
 *
536
 * @param vector Interruption vector.
544
 * @param vector    Interruption vector.
537
 * @param istate Structure with saved interruption state.
545
 * @param istate    Structure with saved interruption state.
538
 */
546
 */
539
void alternate_data_tlb_fault(uint64_t vector, istate_t *istate)
547
void alternate_data_tlb_fault(uint64_t vector, istate_t *istate)
540
{
548
{
541
    region_register rr;
549
    region_register rr;
542
    rid_t rid;
550
    rid_t rid;
Line 566... Line 574...
566
         */
574
         */
567
        dtc_pte_copy(t);
575
        dtc_pte_copy(t);
568
        page_table_unlock(AS, true);
576
        page_table_unlock(AS, true);
569
    } else {
577
    } else {
570
        page_table_unlock(AS, true);
578
        page_table_unlock(AS, true);
571
        if (try_memmap_io_insertion(va,istate)) return;
579
        if (try_memmap_io_insertion(va, istate))
-
 
580
            return;
572
        /*
581
        /*
573
         * Forward the page fault to the address space page fault handler.
582
         * Forward the page fault to the address space page fault
-
 
583
         * handler.
574
         */
584
         */
575
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
585
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
576
            fault_if_from_uspace(istate,"Page fault at %p",va);
586
            fault_if_from_uspace(istate,"Page fault at %p",va);
577
            panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
587
            panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid,
-
 
588
                istate->cr_iip);
578
        }
589
        }
579
    }
590
    }
580
}
591
}
581
 
592
 
582
/** Data nested TLB fault handler.
593
/** Data nested TLB fault handler.
583
 *
594
 *
584
 * This fault should not occur.
595
 * This fault should not occur.
585
 *
596
 *
586
 * @param vector Interruption vector.
597
 * @param vector    Interruption vector.
587
 * @param istate Structure with saved interruption state.
598
 * @param istate    Structure with saved interruption state.
588
 */
599
 */
589
void data_nested_tlb_fault(uint64_t vector, istate_t *istate)
600
void data_nested_tlb_fault(uint64_t vector, istate_t *istate)
590
{
601
{
591
    panic("%s\n", __func__);
602
    panic("%s\n", __func__);
592
}
603
}
593
 
604
 
594
/** Data Dirty bit fault handler.
605
/** Data Dirty bit fault handler.
595
 *
606
 *
596
 * @param vector Interruption vector.
607
 * @param vector    Interruption vector.
597
 * @param istate Structure with saved interruption state.
608
 * @param istate    Structure with saved interruption state.
598
 */
609
 */
599
void data_dirty_bit_fault(uint64_t vector, istate_t *istate)
610
void data_dirty_bit_fault(uint64_t vector, istate_t *istate)
600
{
611
{
601
    region_register rr;
612
    region_register rr;
602
    rid_t rid;
613
    rid_t rid;
Line 618... Line 629...
618
        t->d = true;
629
        t->d = true;
619
        dtc_pte_copy(t);
630
        dtc_pte_copy(t);
620
    } else {
631
    } else {
621
        if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
632
        if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
622
            fault_if_from_uspace(istate,"Page fault at %p",va);
633
            fault_if_from_uspace(istate,"Page fault at %p",va);
623
            panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
634
            panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid,
624
            t->d = true;
-
 
625
            dtc_pte_copy(t);
635
                istate->cr_iip);
626
        }
636
        }
627
    }
637
    }
628
    page_table_unlock(AS, true);
638
    page_table_unlock(AS, true);
629
}
639
}
630
 
640
 
631
/** Instruction access bit fault handler.
641
/** Instruction access bit fault handler.
632
 *
642
 *
633
 * @param vector Interruption vector.
643
 * @param vector    Interruption vector.
634
 * @param istate Structure with saved interruption state.
644
 * @param istate    Structure with saved interruption state.
635
 */
645
 */
636
void instruction_access_bit_fault(uint64_t vector, istate_t *istate)
646
void instruction_access_bit_fault(uint64_t vector, istate_t *istate)
637
{
647
{
638
    region_register rr;
648
    region_register rr;
639
    rid_t rid;
649
    rid_t rid;
Line 654... Line 664...
654
         */
664
         */
655
        t->a = true;
665
        t->a = true;
656
        itc_pte_copy(t);
666
        itc_pte_copy(t);
657
    } else {
667
    } else {
658
        if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
668
        if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
659
            fault_if_from_uspace(istate,"Page fault at %p",va);
669
            fault_if_from_uspace(istate, "Page fault at %p", va);
660
            panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
670
            panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid,
661
            t->a = true;
-
 
662
            itc_pte_copy(t);
671
                istate->cr_iip);
663
        }
672
        }
664
    }
673
    }
665
    page_table_unlock(AS, true);
674
    page_table_unlock(AS, true);
666
}
675
}
667
 
676
 
Line 691... Line 700...
691
         */
700
         */
692
        t->a = true;
701
        t->a = true;
693
        dtc_pte_copy(t);
702
        dtc_pte_copy(t);
694
    } else {
703
    } else {
695
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
704
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
696
            fault_if_from_uspace(istate,"Page fault at %p",va);
705
            fault_if_from_uspace(istate, "Page fault at %p", va);
697
            panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid, istate->cr_iip);
706
            panic("%s: va=%p, rid=%d, iip=%p\n", __func__, va, rid,
698
            t->a = true;
-
 
699
            itc_pte_copy(t);
707
                istate->cr_iip);
700
        }
708
        }
701
    }
709
    }
702
    page_table_unlock(AS, true);
710
    page_table_unlock(AS, true);
703
}
711
}
704
 
712
 
Line 733... Line 741...
733
            dtc_pte_copy(t);
741
            dtc_pte_copy(t);
734
        page_table_unlock(AS, true);
742
        page_table_unlock(AS, true);
735
    } else {
743
    } else {
736
        page_table_unlock(AS, true);
744
        page_table_unlock(AS, true);
737
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
745
        if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
738
            fault_if_from_uspace(istate,"Page fault at %p",va);
746
            fault_if_from_uspace(istate, "Page fault at %p", va);
739
            panic("%s: va=%p, rid=%d\n", __func__, va, rid);
747
            panic("%s: va=%p, rid=%d\n", __func__, va, rid);
740
        }
748
        }
741
    }
749
    }
742
}
750
}
743
 
751