Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 1851 → Rev 1852

/trunk/kernel/test/synch/rwlock4/test.c
128,9 → 128,7
waitq_initialize(&can_start);
rwlock_initialize(&rwlock);
 
for (; ;) {
for (;;) {
thread_t *thrd;
context_save(&ctx);
/trunk/kernel/test/thread/thread1/test.c
44,13 → 44,8
 
thread_detach(THREAD);
 
while(1)
{
while (1)
;
while (1)
printf("%d\n",(int)(THREAD->tid));
scheduler();
}
}
 
void test(void)
/trunk/kernel/arch/sparc64/_link.ld.in
26,6 → 26,7
*(.sdata);
*(.sdata2);
*(.sbss);
. = ALIGN(8);
hardcoded_ktext_size = .;
QUAD(ktext_end - ktext_start);
hardcoded_kdata_size = .;
/trunk/kernel/arch/sparc64/include/interrupt.h
44,13 → 44,6
#define IVT_ITEMS 15
#define IVT_FIRST 1
 
/* Dummy macros. */
#define IRQ_KBD 2
#define VECTOR_KBD IRQ_KBD
 
#define trap_virtual_enable_irqs(x)
#define trap_virtual_eoi()
 
struct istate {
uint64_t pstate;
uint64_t tnpc;
/trunk/kernel/arch/sparc64/include/regdef.h
35,13 → 35,20
#ifndef KERN_sparc64_REGDEF_H_
#define KERN_sparc64_REGDEF_H_
 
#define PSTATE_IE_BIT 2
#define PSTATE_AM_BIT 8
#define PSTATE_IE_BIT (1<<1)
#define PSTATE_AM_BIT (1<<3)
 
#define PSTATE_AG_BIT (1<<0)
#define PSTATE_IG_BIT (1<<11)
#define PSTATE_MG_BIT (1<<10)
 
#define PSTATE_PRIV_BIT (1<<2)
 
#define TSTATE_PSTATE_SHIFT 8
#define TSTATE_PRIV_BIT (PSTATE_PRIV_BIT<<TSTATE_PSTATE_SHIFT)
 
#define TSTATE_CWP_MASK 0x1f
 
#endif
 
/** @}
/trunk/kernel/arch/sparc64/include/arch.h
35,6 → 35,9
#ifndef __sparc64_ARCH_H__
#define __sparc64_ARCH_H__
 
#define ASI_AIUP 0x10 /** Access to primary context with user privileges. */
#define ASI_AIUS 0x11 /** Access to secondary context with user privileges. */
 
#endif
 
/** @}
/trunk/kernel/arch/sparc64/include/trap/regwin.h
38,6 → 38,7
#define __sparc64_REGWIN_H__
 
#include <arch/stack.h>
#include <arch/arch.h>
 
#define TT_CLEAN_WINDOW 0x24
#define TT_SPILL_0_NORMAL 0x80
49,7 → 50,9
#define SPILL_HANDLER_SIZE REGWIN_HANDLER_SIZE
#define FILL_HANDLER_SIZE REGWIN_HANDLER_SIZE
 
/** Window Save Area offsets. */
#define NWINDOW 8
 
/* Window Save Area offsets. */
#define L0_OFFSET 0
#define L1_OFFSET 8
#define L2_OFFSET 16
68,7 → 71,11
#define I7_OFFSET 120
 
#ifdef __ASM__
.macro SPILL_NORMAL_HANDLER
 
/*
* Macro used by the nucleus and the primary context 0 during normal and other spills.
*/
.macro SPILL_NORMAL_HANDLER_KERNEL
stx %l0, [%sp + STACK_BIAS + L0_OFFSET]
stx %l1, [%sp + STACK_BIAS + L1_OFFSET]
stx %l2, [%sp + STACK_BIAS + L2_OFFSET]
89,7 → 96,61
retry
.endm
 
.macro FILL_NORMAL_HANDLER
/*
* Macro used by the userspace during normal spills.
*/
.macro SPILL_NORMAL_HANDLER_USERSPACE
wr ASI_AIUP, %asi
stxa %l0, [%sp + STACK_BIAS + L0_OFFSET] %asi
stxa %l1, [%sp + STACK_BIAS + L1_OFFSET] %asi
stxa %l2, [%sp + STACK_BIAS + L2_OFFSET] %asi
stxa %l3, [%sp + STACK_BIAS + L3_OFFSET] %asi
stxa %l4, [%sp + STACK_BIAS + L4_OFFSET] %asi
stxa %l5, [%sp + STACK_BIAS + L5_OFFSET] %asi
stxa %l6, [%sp + STACK_BIAS + L6_OFFSET] %asi
stxa %l7, [%sp + STACK_BIAS + L7_OFFSET] %asi
stxa %i0, [%sp + STACK_BIAS + I0_OFFSET] %asi
stxa %i1, [%sp + STACK_BIAS + I1_OFFSET] %asi
stxa %i2, [%sp + STACK_BIAS + I2_OFFSET] %asi
stxa %i3, [%sp + STACK_BIAS + I3_OFFSET] %asi
stxa %i4, [%sp + STACK_BIAS + I4_OFFSET] %asi
stxa %i5, [%sp + STACK_BIAS + I5_OFFSET] %asi
stxa %i6, [%sp + STACK_BIAS + I6_OFFSET] %asi
stxa %i7, [%sp + STACK_BIAS + I7_OFFSET] %asi
saved
retry
.endm
 
/*
* Macro used by the userspace during other spills.
*/
.macro SPILL_OTHER_HANDLER_USERSPACE
wr ASI_AIUS, %asi
stxa %l0, [%sp + STACK_BIAS + L0_OFFSET] %asi
stxa %l1, [%sp + STACK_BIAS + L1_OFFSET] %asi
stxa %l2, [%sp + STACK_BIAS + L2_OFFSET] %asi
stxa %l3, [%sp + STACK_BIAS + L3_OFFSET] %asi
stxa %l4, [%sp + STACK_BIAS + L4_OFFSET] %asi
stxa %l5, [%sp + STACK_BIAS + L5_OFFSET] %asi
stxa %l6, [%sp + STACK_BIAS + L6_OFFSET] %asi
stxa %l7, [%sp + STACK_BIAS + L7_OFFSET] %asi
stxa %i0, [%sp + STACK_BIAS + I0_OFFSET] %asi
stxa %i1, [%sp + STACK_BIAS + I1_OFFSET] %asi
stxa %i2, [%sp + STACK_BIAS + I2_OFFSET] %asi
stxa %i3, [%sp + STACK_BIAS + I3_OFFSET] %asi
stxa %i4, [%sp + STACK_BIAS + I4_OFFSET] %asi
stxa %i5, [%sp + STACK_BIAS + I5_OFFSET] %asi
stxa %i6, [%sp + STACK_BIAS + I6_OFFSET] %asi
stxa %i7, [%sp + STACK_BIAS + I7_OFFSET] %asi
saved
retry
.endm
 
 
/*
* Macro used by the nucleus and the primary context 0 during normal fills.
*/
.macro FILL_NORMAL_HANDLER_KERNEL
ldx [%sp + STACK_BIAS + L0_OFFSET], %l0
ldx [%sp + STACK_BIAS + L1_OFFSET], %l1
ldx [%sp + STACK_BIAS + L2_OFFSET], %l2
110,6 → 171,56
retry
.endm
 
/*
* Macro used by the userspace during normal fills.
*/
.macro FILL_NORMAL_HANDLER_USERSPACE
wr ASI_AIUP, %asi
ldxa [%sp + STACK_BIAS + L0_OFFSET] %asi, %l0
ldxa [%sp + STACK_BIAS + L1_OFFSET] %asi, %l1
ldxa [%sp + STACK_BIAS + L2_OFFSET] %asi, %l2
ldxa [%sp + STACK_BIAS + L3_OFFSET] %asi, %l3
ldxa [%sp + STACK_BIAS + L4_OFFSET] %asi, %l4
ldxa [%sp + STACK_BIAS + L5_OFFSET] %asi, %l5
ldxa [%sp + STACK_BIAS + L6_OFFSET] %asi, %l6
ldxa [%sp + STACK_BIAS + L7_OFFSET] %asi, %l7
ldxa [%sp + STACK_BIAS + I0_OFFSET] %asi, %i0
ldxa [%sp + STACK_BIAS + I1_OFFSET] %asi, %i1
ldxa [%sp + STACK_BIAS + I2_OFFSET] %asi, %i2
ldxa [%sp + STACK_BIAS + I3_OFFSET] %asi, %i3
ldxa [%sp + STACK_BIAS + I4_OFFSET] %asi, %i4
ldxa [%sp + STACK_BIAS + I5_OFFSET] %asi, %i5
ldxa [%sp + STACK_BIAS + I6_OFFSET] %asi, %i6
ldxa [%sp + STACK_BIAS + I7_OFFSET] %asi, %i7
restored
retry
.endm
 
/*
* Macro used by the userspace during other fills.
*/
.macro FILL_OTHER_HANDLER_USERSPACE
wr ASI_AIUS, %asi
ldxa [%sp + STACK_BIAS + L0_OFFSET] %asi, %l0
ldxa [%sp + STACK_BIAS + L1_OFFSET] %asi, %l1
ldxa [%sp + STACK_BIAS + L2_OFFSET] %asi, %l2
ldxa [%sp + STACK_BIAS + L3_OFFSET] %asi, %l3
ldxa [%sp + STACK_BIAS + L4_OFFSET] %asi, %l4
ldxa [%sp + STACK_BIAS + L5_OFFSET] %asi, %l5
ldxa [%sp + STACK_BIAS + L6_OFFSET] %asi, %l6
ldxa [%sp + STACK_BIAS + L7_OFFSET] %asi, %l7
ldxa [%sp + STACK_BIAS + I0_OFFSET] %asi, %i0
ldxa [%sp + STACK_BIAS + I1_OFFSET] %asi, %i1
ldxa [%sp + STACK_BIAS + I2_OFFSET] %asi, %i2
ldxa [%sp + STACK_BIAS + I3_OFFSET] %asi, %i3
ldxa [%sp + STACK_BIAS + I4_OFFSET] %asi, %i4
ldxa [%sp + STACK_BIAS + I5_OFFSET] %asi, %i5
ldxa [%sp + STACK_BIAS + I6_OFFSET] %asi, %i6
ldxa [%sp + STACK_BIAS + I7_OFFSET] %asi, %i7
restored
retry
.endm
 
.macro CLEAN_WINDOW_HANDLER
rdpr %cleanwin, %l0
add %l0, 1, %l0
/trunk/kernel/arch/sparc64/include/trap/interrupt.h
79,8 → 79,7
 
#ifdef __ASM__
.macro INTERRUPT_LEVEL_N_HANDLER n
save %sp, -PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE, %sp
mov \n - 1, %o0
mov \n - 1, %g2
PREEMPTIBLE_HANDLER exc_dispatch
.endm
 
/trunk/kernel/arch/sparc64/include/trap/trap_table.h
85,12 → 85,11
#define SAVED_TSTATE -(1*8)
#define SAVED_TPC -(2*8)
#define SAVED_TNPC -(3*8)
#define SAVED_PSTATE -(4*8)
 
.macro PREEMPTIBLE_HANDLER f
set \f, %l0
sethi %hi(\f), %g1
b preemptible_handler
nop
or %g1, %lo(\f), %g1
.endm
 
.macro SIMPLE_HANDLER f
/trunk/kernel/arch/sparc64/include/trap/mmu.h
38,6 → 38,7
#define __sparc64_MMU_TRAP_H__
 
#include <arch/stack.h>
#include <arch/regdef.h>
#include <arch/mm/tlb.h>
#include <arch/mm/mmu.h>
#include <arch/mm/tte.h>
59,7 → 60,12
 
.macro FAST_DATA_ACCESS_MMU_MISS_HANDLER
/*
* First, test if it is the portion of the kernel address space
* First, try to refill TLB from TSB.
*/
! TODO
 
/*
* Second, test if it is the portion of the kernel address space
* which is faulting. If that is the case, immediately create
* identity mapping for that page in DTLB. VPN 0 is excluded from
* this treatment.
66,6 → 72,7
*
* Note that branch-delay slots are used in order to save space.
*/
0:
mov VA_DMMU_TAG_ACCESS, %g1
ldxa [%g1] ASI_DMMU, %g1 ! read the faulting Context and VPN
set TLB_TAG_ACCESS_CONTEXT_MASK, %g2
74,11 → 81,6
andncc %g1, %g2, %g3 ! get page address into %g3
bz 0f ! page address is zero
 
/*
* Create and insert the identity-mapped entry for
* the faulting kernel page.
*/
or %g3, (TTE_CP|TTE_P|TTE_W), %g2 ! 8K pages are the default (encoded as 0)
set 1, %g3
sllx %g3, TTE_V_SHIFT, %g3
86,8 → 88,15
stxa %g2, [%g0] ASI_DTLB_DATA_IN_REG ! identity map the kernel page
retry
 
/*
* Third, catch and handle special cases when the trap is caused by
* some register window trap handler.
*/
0:
save %sp, -PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE, %sp
! TODO
 
0:
wrpr %g0, PSTATE_PRIV_BIT | PSTATE_AG_BIT, %pstate
PREEMPTIBLE_HANDLER fast_data_access_mmu_miss
.endm
 
/trunk/kernel/arch/sparc64/src/trap/trap_table.S
32,8 → 32,6
 
.register %g2, #scratch
.register %g3, #scratch
.register %g6, #scratch
.register %g7, #scratch
 
.text
 
203,13 → 201,13
.org trap_table + TT_SPILL_0_NORMAL*ENTRY_SIZE
.global spill_0_normal
spill_0_normal:
SPILL_NORMAL_HANDLER
SPILL_NORMAL_HANDLER_KERNEL
 
/* TT = 0xc0, TL = 0, fill_0_normal handler */
.org trap_table + TT_FILL_0_NORMAL*ENTRY_SIZE
.global fill_0_normal
fill_0_normal:
FILL_NORMAL_HANDLER
FILL_NORMAL_HANDLER_KERNEL
 
/*
* Handlers for TL>0.
267,48 → 265,87
.org trap_table + (TT_SPILL_0_NORMAL+512)*ENTRY_SIZE
.global spill_0_normal_high
spill_0_normal_high:
SPILL_NORMAL_HANDLER
SPILL_NORMAL_HANDLER_KERNEL
 
/* TT = 0xc0, TL > 0, fill_0_normal handler */
.org trap_table + (TT_FILL_0_NORMAL+512)*ENTRY_SIZE
.global fill_0_normal_high
fill_0_normal_high:
FILL_NORMAL_HANDLER
FILL_NORMAL_HANDLER_KERNEL
 
 
/* Preemptible trap handler for TL=1.
*
* This trap handler makes arrangements to make calling of scheduler() from
* within a trap context possible. It is guaranteed to function only when traps
* are not nested (i.e. for TL=1).
* within a trap context possible. It is called from several other trap
* handlers.
*
* Every trap handler on TL=1 that makes a call to the scheduler needs to
* be based on this function. The reason behind it is that the nested
* trap levels and the automatic saving of the interrupted context by hardware
* does not work well together with scheduling (i.e. a thread cannot be rescheduled
* with TL>0). Therefore it is necessary to eliminate the effect of trap levels
* by software and save the necessary state on the kernel stack.
* This function can be entered either with interrupt globals or alternate globals.
* Memory management trap handlers are obliged to switch to one of those global sets
* prior to calling this function. Register window management functions are not
* allowed to modify the alternate global registers.
*
* Note that for traps with TL>1, more state needs to be saved. This function
* is therefore not going to work when TL>1.
*
* The caller is responsible for doing SAVE and allocating
* PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE bytes on the stack.
*
* Input registers:
* %l0 Address of function to call.
* Output registers:
* %l1 - %l7 Copy of %g1 - %g7
* %g1 Address of function to call.
* %g2 Argument for the function.
* %g6 Pre-set as kernel stack base if trap from userspace.
* %g7 Reserved.
*/
.global preemptible_handler
preemptible_handler:
rdpr %tstate, %g3
andcc %g3, TSTATE_PRIV_BIT, %g0 ! if this trap came from the privileged mode...
bnz 0f ! ...skip setting of kernel stack and primary context
nop
 
/*
* Save TSTATE, TPC, TNPC and PSTATE aside.
* Switch to kernel stack. The old stack is
* automatically saved in the old window's %sp
* and the new window's %fp.
*/
save %g6, -PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE, %sp
 
/*
* Mark the CANSAVE windows as OTHER windows.
* Set CLEANWIN to NWINDOW-1 so that clean_window traps do not occur.
*/
rdpr %cansave, %l0
wrpr %l0, %otherwin
wrpr %g0, %cansave
wrpr %g0, NWINDOW-1, %cleanwin
 
/*
* Switch to primary context 0.
*/
mov VA_PRIMARY_CONTEXT_REG, %l0
stxa %g0, [%l0] ASI_DMMU
set kernel_image_start, %l0
flush %l0
 
ba 1f
nop
 
0:
save %sp, -PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE, %sp
 
/*
* At this moment, we are using the kernel stack
* and have successfully allocated a register window.
*/
1:
 
/*
* Copy arguments.
*/
mov %g1, %l0
mov %g2, %o0
 
/*
* Save TSTATE, TPC and TNPC aside.
*/
rdpr %tstate, %g1
rdpr %tpc, %g2
rdpr %tnpc, %g3
rdpr %pstate, %g4
 
/*
* The following memory accesses will not fault
315,82 → 352,77
* because special provisions are made to have
* the kernel stack of THREAD locked in DTLB.
*/
stx %g1, [%fp + STACK_BIAS + SAVED_TSTATE]
stx %g2, [%fp + STACK_BIAS + SAVED_TPC]
stx %g3, [%fp + STACK_BIAS + SAVED_TNPC]
stx %g4, [%fp + STACK_BIAS + SAVED_PSTATE]
stx %g1, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TSTATE]
stx %g2, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TPC]
stx %g3, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TNPC]
/*
* Write 0 to TL.
*/
wrpr %g0, 0, %tl
/*
* Alter PSTATE.
* - switch to normal globals.
*/
and %g4, ~(PSTATE_AG_BIT|PSTATE_IG_BIT|PSTATE_MG_BIT), %g4
wrpr %g4, 0, %pstate
/*
* Save the normal globals.
*/
wrpr %g0, PSTATE_PRIV_BIT, %pstate
SAVE_GLOBALS
/*
* Call the higher-level handler.
* Call the higher-level handler and pass istate as second parameter.
*/
mov %fp, %o1 ! calculate istate address
call %l0
add %o1, STACK_BIAS + SAVED_PSTATE, %o1 ! calculate istate address
add %sp, PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TNPC, %o1
 
RESTORE_GLOBALS
wrpr %g0, PSTATE_AG_BIT | PSTATE_PRIV_BIT, %pstate
wrpr %g0, 1, %tl
/*
* Restore the normal global register set.
* Read TSTATE, TPC and TNPC from saved copy.
*/
RESTORE_GLOBALS
ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TSTATE], %g1
ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TPC], %g2
ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TNPC], %g3
 
/*
* Restore PSTATE from saved copy.
* Alternate/Interrupt/MM globals become active.
* Restore TSTATE, TPC and TNPC from saved copies.
*/
ldx [%fp + STACK_BIAS + SAVED_PSTATE], %l4
wrpr %l4, 0, %pstate
wrpr %g1, 0, %tstate
wrpr %g2, 0, %tpc
wrpr %g3, 0, %tnpc
 
/*
* Write 1 to TL.
* If OTHERWIN is zero, then all the userspace windows have been
* spilled to kernel memory (i.e. register window buffer). If
* OTHERWIN is non-zero, then some userspace windows are still
* valid. Others might have been spilled. However, the CWP pointer
* needs no fixing because the scheduler had not been called.
*/
wrpr %g0, 1, %tl
rdpr %otherwin, %l0
brnz %l0, 0f
nop
 
/*
* Read TSTATE, TPC and TNPC from saved copy.
* OTHERWIN == 0
*/
ldx [%fp + STACK_BIAS + SAVED_TSTATE], %g1
ldx [%fp + STACK_BIAS + SAVED_TPC], %g2
ldx [%fp + STACK_BIAS + SAVED_TNPC], %g3
 
/*
* Do restore to match the save instruction from the top-level handler.
* If TSTATE.CWP + 1 == CWP, then we still do not have to fix CWP.
*/
restore
and %g1, TSTATE_CWP_MASK, %l0
inc %l0
and %l0, TSTATE_CWP_MASK, %l0 ! %l0 mod NWINDOW
rdpr %cwp, %l1
cmp %l0, %l1
bz 0f ! CWP is ok
nop
 
/*
* On execution of the RETRY instruction, CWP will be restored from the TSTATE
* register. However, because of scheduling, it is possible that CWP in the saved
* TSTATE is different from the current CWP. The following chunk of code fixes
* CWP in the saved copy of TSTATE.
* Fix CWP.
*/
rdpr %cwp, %g4 ! read current CWP
and %g1, ~0x1f, %g1 ! clear CWP field in saved TSTATE
or %g1, %g4, %g1 ! write current CWP to TSTATE
mov %fp, %g1
flushw
wrpr %l0, 0, %cwp
mov %g1, %fp
/*
* Restore TSTATE, TPC and TNPC from saved copies.
* OTHERWIN != 0 or fall-through from the OTHERWIN == 0 case.
*/
wrpr %g1, 0, %tstate
wrpr %g2, 0, %tpc
wrpr %g3, 0, %tnpc
/*
* Return from interrupt.
*/
0:
! TODO: restore register windows from register window memory buffer
 
restore
retry
/trunk/kernel/arch/sparc64/src/mm/tlb.c
50,8 → 50,10
#include <arch/asm.h>
#include <symtab.h>
 
static void dtlb_pte_copy(pte_t *t);
static void dtlb_pte_copy(pte_t *t, bool ro);
static void itlb_pte_copy(pte_t *t);
static void do_fast_data_access_mmu_miss_fault(istate_t *istate, const char *str);
static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char *str);
 
char *context_encoding[] = {
"Primary",
105,14 → 107,96
dtlb_data_in_write(data.value);
}
 
void dtlb_pte_copy(pte_t *t)
/** Copy PTE to TLB.
*
* @param t Page Table Entry to be copied.
* @param ro If true, the entry will be created read-only, regardless of its w field.
*/
void dtlb_pte_copy(pte_t *t, bool ro)
{
tlb_tag_access_reg_t tag;
tlb_data_t data;
page_address_t pg;
frame_address_t fr;
 
pg.address = t->page;
fr.address = t->frame;
 
tag.value = 0;
tag.context = t->as->asid;
tag.vpn = pg.vpn;
dtlb_tag_access_write(tag.value);
data.value = 0;
data.v = true;
data.size = PAGESIZE_8K;
data.pfn = fr.pfn;
data.l = false;
data.cp = t->c;
data.cv = t->c;
data.p = t->p;
data.w = ro ? false : t->w;
data.g = t->g;
dtlb_data_in_write(data.value);
}
 
void itlb_pte_copy(pte_t *t)
{
tlb_tag_access_reg_t tag;
tlb_data_t data;
page_address_t pg;
frame_address_t fr;
 
pg.address = t->page;
fr.address = t->frame;
 
tag.value = 0;
tag.context = t->as->asid;
tag.vpn = pg.vpn;
itlb_tag_access_write(tag.value);
data.value = 0;
data.v = true;
data.size = PAGESIZE_8K;
data.pfn = fr.pfn;
data.l = false;
data.cp = t->c;
data.cv = t->c;
data.p = t->p;
data.w = false;
data.g = t->g;
itlb_data_in_write(data.value);
}
 
/** ITLB miss handler. */
void fast_instruction_access_mmu_miss(int n, istate_t *istate)
{
panic("%s\n", __FUNCTION__);
uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
pte_t *t;
 
page_table_lock(AS, true);
t = page_mapping_find(AS, va);
if (t && PTE_EXECUTABLE(t)) {
/*
* The mapping was found in the software page hash table.
* Insert it into ITLB.
*/
t->a = true;
itlb_pte_copy(t);
page_table_unlock(AS, true);
} else {
/*
* Forward the page fault to the address space page fault handler.
*/
page_table_unlock(AS, true);
if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
do_fast_instruction_access_mmu_miss_fault(istate, __FUNCTION__);
}
}
}
 
/** DTLB miss handler.
144,7 → 228,8
* The mapping was found in the software page hash table.
* Insert it into DTLB.
*/
dtlb_pte_copy(t);
t->a = true;
dtlb_pte_copy(t, true);
page_table_unlock(AS, true);
} else {
/*
190,6 → 275,14
 
}
 
void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char *str)
{
char *tpc_str = get_symtab_entry(istate->tpc);
 
printf("TPC=%p, (%s)\n", istate->tpc, tpc_str);
panic("%s\n", str);
}
 
void do_fast_data_access_mmu_miss_fault(istate_t *istate, const char *str)
{
tlb_tag_access_reg_t tag;
/trunk/kernel/arch/sparc64/src/start.S
35,8 → 35,6
 
.register %g2, #scratch
.register %g3, #scratch
.register %g6, #scratch
.register %g7, #scratch
 
.section K_TEXT_START, "ax"
 
153,43 → 151,43
* addresses already mapped by the taken over DTLB.
*/
set kernel_image_start, %g7
set kernel_image_start, %g5
! write ITLB tag of context 1
SET_TLB_TAG(g1, MEM_CONTEXT_TEMP)
set VA_DMMU_TAG_ACCESS, %g2
stxa %g1, [%g2] ASI_IMMU
flush %g7
flush %g5
 
! write ITLB data and install the temporary mapping in context 1
SET_TLB_DATA(g1, g2, 0) ! use non-global mapping
stxa %g1, [%g0] ASI_ITLB_DATA_IN_REG
flush %g7
flush %g5
! switch to context 1
set MEM_CONTEXT_TEMP, %g1
stxa %g1, [VA_PRIMARY_CONTEXT_REG] %asi ! ASI_DMMU is correct here !!!
flush %g7
flush %g5
! demap context 0
SET_TLB_DEMAP_CMD(g1, TLB_DEMAP_NUCLEUS)
stxa %g0, [%g1] ASI_IMMU_DEMAP
flush %g7
flush %g5
! write ITLB tag of context 0
SET_TLB_TAG(g1, MEM_CONTEXT_KERNEL)
set VA_DMMU_TAG_ACCESS, %g2
stxa %g1, [%g2] ASI_IMMU
flush %g7
flush %g5
 
! write ITLB data and install the permanent kernel mapping in context 0
SET_TLB_DATA(g1, g2, 0) ! use non-global mapping
stxa %g1, [%g0] ASI_ITLB_DATA_IN_REG
flush %g7
flush %g5
 
! switch to context 0
stxa %g0, [VA_PRIMARY_CONTEXT_REG] %asi ! ASI_DMMU is correct here !!!
flush %g7
flush %g5
 
! ensure nucleus mapping
wrpr %g0, 1, %tl
197,16 → 195,16
! set context 1 in the primary context register
set MEM_CONTEXT_TEMP, %g1
stxa %g1, [VA_PRIMARY_CONTEXT_REG] %asi ! ASI_DMMU is correct here !!!
flush %g7
flush %g5
 
! demap context 1
SET_TLB_DEMAP_CMD(g1, TLB_DEMAP_PRIMARY)
stxa %g0, [%g1] ASI_IMMU_DEMAP
flush %g7
flush %g5
! set context 0 in the primary context register
stxa %g0, [VA_PRIMARY_CONTEXT_REG] %asi ! ASI_DMMU is correct here !!!
flush %g7
flush %g5
! set TL back to 0
wrpr %g0, 0, %tl