/branches/sparc/kernel/arch/sparc64/src/sun4v/asm.S |
---|
32,20 → 32,6 |
.text |
/* TODO: remove it as soon as there is a scheduler for sun4v. It is here only to make the code compilable/ */ |
.global write_to_ag_g6 |
write_to_ag_g6: |
.global write_to_ag_g7 |
write_to_ag_g7: |
.global write_to_ig_g6 |
write_to_ig_g6: |
.global read_from_ag_g7 |
read_from_ag_g7: |
/** Switch to userspace. |
* |
* %o0 Userspace entry address. |
54,7 → 40,6 |
*/ |
.global switch_to_userspace |
switch_to_userspace: |
#if 0 |
save %o1, -STACK_WINDOW_SAVE_AREA_SIZE, %sp |
flushw |
wrpr %g0, 0, %cleanwin ! avoid information leak |
90,4 → 75,3 |
wrpr %g0, WSTATE_OTHER(0) | WSTATE_NORMAL(1), %wstate |
done ! jump to userspace |
#endif |
/branches/sparc/kernel/arch/sparc64/src/sun4v/start.S |
---|
272,5 → 272,10 |
.align MMU_FSA_ALIGNMENT |
.global mmu_fsas |
mmu_fsas: |
!.space (MMU_FSA_SIZE * MAX_NUM_STRANDS) |
.space 8192 |
.space (MMU_FSA_SIZE * MAX_NUM_STRANDS) |
/* area containing kernel stack and uspace window buffer pointers of all CPUs */ |
.align KSTACK_WBUF_PTR_SIZE |
.global kstack_wbuf_ptrs |
kstack_wbuf_ptrs: |
.space (KSTACK_WBUF_PTR_SIZE * MAX_NUM_STRANDS) |
/branches/sparc/kernel/arch/sparc64/src/proc/scheduler.c |
---|
File deleted |
/branches/sparc/kernel/arch/sparc64/src/proc/sun4v/scheduler.c |
---|
0,0 → 1,80 |
/* |
* Copyright (c) 2006 Jakub Jermar |
* Copyright (c) 2009 Pavel Rimsky |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup sparc64proc |
* @{ |
*/ |
/** @file |
*/ |
#include <proc/scheduler.h> |
#include <proc/thread.h> |
#include <arch.h> |
#include <arch/asm.h> |
#include <arch/stack.h> |
#include <arch/sun4v/cpu.h> |
#include <arch/sun4v/hypercall.h> |
extern kstack_wbuf_ptr kstack_wbuf_ptrs[MAX_NUM_STRANDS]; |
/** Perform sparc64 specific tasks needed before the new task is run. */ |
void before_task_runs_arch(void) |
{ |
} |
/** Perform sparc64 specific steps before scheduling a thread. |
* |
* For userspace threads, initialize pointer to the kernel stack and for the |
* userspace window buffer. |
*/ |
void before_thread_runs_arch(void) |
{ |
if ((THREAD->flags & THREAD_FLAG_USPACE)) { |
uint64_t sp = (uintptr_t) THREAD->kstack + STACK_SIZE - |
(STACK_BIAS + ALIGN_UP(STACK_ITEM_SIZE, STACK_ALIGNMENT)); |
int cpuid = asi_u64_read(ASI_SCRATCHPAD, SCRATCHPAD_CPUID); |
kstack_wbuf_ptrs[cpuid].kstack = sp; |
kstack_wbuf_ptrs[cpuid].wbuf = |
(uintptr_t) THREAD->arch.uspace_window_buffer; |
} |
} |
/** Perform sparc64 specific steps before a thread stops running. */ |
void after_thread_ran_arch(void) |
{ |
if ((THREAD->flags & THREAD_FLAG_USPACE)) { |
/* sample the state of the userspace window buffer */ |
int cpuid = asi_u64_read(ASI_SCRATCHPAD, SCRATCHPAD_CPUID); |
THREAD->arch.uspace_window_buffer = |
(uint8_t *) kstack_wbuf_ptrs[cpuid].wbuf; |
} |
} |
/** @} |
*/ |
/branches/sparc/kernel/arch/sparc64/src/proc/sun4u/scheduler.c |
---|
0,0 → 1,83 |
/* |
* Copyright (c) 2006 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup sparc64proc |
* @{ |
*/ |
/** @file |
*/ |
#include <proc/scheduler.h> |
#include <proc/thread.h> |
#include <arch.h> |
#include <arch/asm.h> |
#include <arch/stack.h> |
/** Perform sparc64 specific tasks needed before the new task is run. */ |
void before_task_runs_arch(void) |
{ |
} |
/** Perform sparc64 specific steps before scheduling a thread. |
* |
* For userspace threads, initialize reserved global registers in the alternate |
* and interrupt sets. |
*/ |
void before_thread_runs_arch(void) |
{ |
if ((THREAD->flags & THREAD_FLAG_USPACE)) { |
/* |
* Write kernel stack address to %g6 of the alternate and |
* interrupt global sets. |
* |
* Write pointer to the last item in the userspace window buffer |
* to %g7 in the alternate set. Write to the interrupt %g7 is |
* not necessary because: |
* - spill traps operate only in the alternate global set, |
* - preemptible trap handler switches to alternate globals |
* before it explicitly uses %g7. |
*/ |
uint64_t sp = (uintptr_t) THREAD->kstack + STACK_SIZE - |
(STACK_BIAS + ALIGN_UP(STACK_ITEM_SIZE, STACK_ALIGNMENT)); |
write_to_ig_g6(sp); |
write_to_ag_g6(sp); |
write_to_ag_g7((uintptr_t) THREAD->arch.uspace_window_buffer); |
} |
} |
/** Perform sparc64 specific steps before a thread stops running. */ |
void after_thread_ran_arch(void) |
{ |
if ((THREAD->flags & THREAD_FLAG_USPACE)) { |
/* sample the state of the userspace window buffer */ |
THREAD->arch.uspace_window_buffer = (uint8_t *) read_from_ag_g7(); |
} |
} |
/** @} |
*/ |
/branches/sparc/kernel/arch/sparc64/src/trap/sun4v/trap_table.S |
---|
47,6 → 47,8 |
#include <arch/mm/page.h> |
#include <arch/stack.h> |
#include <arch/sun4v/regdef.h> |
#include <arch/sun4v/arch.h> |
#include <arch/sun4v/cpu.h> |
#define TABLE_SIZE TRAP_TABLE_SIZE |
#define ENTRY_SIZE TRAP_TABLE_ENTRY_SIZE |
292,7 → 294,7 |
.org trap_table + TT_FAST_DATA_ACCESS_PROTECTION*ENTRY_SIZE |
.global fast_data_access_protection_handler_tl0 |
fast_data_access_protection_handler_tl0: |
/*FAST_DATA_ACCESS_PROTECTION_HANDLER 0*/ |
FAST_DATA_ACCESS_PROTECTION_HANDLER 0 |
/* TT = 0x80, TL = 0, spill_0_normal handler */ |
.org trap_table + TT_SPILL_0_NORMAL*ENTRY_SIZE |
524,42 → 526,27 |
restored |
.endm |
/* |
* Preemptible trap handler for handling traps from kernel. |
*/ |
.macro PREEMPTIBLE_HANDLER_KERNEL |
#define NOT(x) ((x) == 0) |
/* |
* ASSERT(%tl == 1) |
* Perform all the actions of the preemptible trap handler which are common |
* for trapping from kernel and trapping from userspace, including call of the |
* higher level service routine. |
* |
* Important note: |
* This macro must be inserted between the "2:" and "4:" labels. The |
* inserting code must be aware of the usage of all the registers |
* contained in this macro. |
*/ |
rdpr %tl, %g3 |
cmp %g3, 1 |
be 1f |
nop |
0: ba 0b ! this is for debugging, if we ever get here |
nop ! it will be easy to find |
/* prevent unnecessary CLEANWIN exceptions */ |
wrpr %g0, NWINDOWS - 1, %cleanwin |
1: |
/* |
* Prevent SAVE instruction from causing a spill exception. If the |
* CANSAVE register is zero, explicitly spill register window |
* at CWP + 2. |
*/ |
rdpr %cansave, %g3 |
brnz %g3, 2f |
nop |
INLINE_SPILL %g3, %g4 |
2: |
/* ask for new register window */ |
save %sp, -PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE, %sp |
.macro MIDDLE_PART is_syscall |
/* copy higher level routine's address and its argument */ |
mov %g1, %l0 |
.if NOT(\is_syscall) |
mov %g2, %o0 |
.else |
! store the syscall number on the stack as 7th argument |
stx %g2, [%sp + STACK_WINDOW_SAVE_AREA_SIZE + STACK_BIAS + STACK_ARG6] |
.endif |
/* |
* Save TSTATE, TPC and TNPC aside. |
590,9 → 577,18 |
/* g1 -> l1, ..., g7 -> l7 */ |
SAVE_GLOBALS |
.if NOT(\is_syscall) |
/* call higher-level service routine, pass istate as its 2nd parameter */ |
call %l0 |
add %sp, PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TNPC, %o1 |
.else |
/* Call the higher-level syscall handler. */ |
wrpr %g0, PSTATE_PRIV_BIT | PSTATE_PEF_BIT | PSTATE_IE_BIT, %pstate |
call syscall_handler |
nop |
/* copy the value returned by the syscall */ |
mov %o0, %i0 |
.endif |
/* l1 -> g1, ..., l7 -> g7 */ |
RESTORE_GLOBALS |
663,7 → 659,44 |
ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I5], %i5 |
ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I6], %i6 |
ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I7], %i7 |
.endm |
/* |
* Preemptible trap handler for handling traps from kernel. |
*/ |
.macro PREEMPTIBLE_HANDLER_KERNEL |
/* |
* ASSERT(%tl == 1) |
*/ |
rdpr %tl, %g3 |
cmp %g3, 1 |
be 1f |
nop |
0: ba 0b ! this is for debugging, if we ever get here |
nop ! it will be easy to find |
1: |
/* prevent unnecessary CLEANWIN exceptions */ |
wrpr %g0, NWINDOWS - 1, %cleanwin |
/* |
* Prevent SAVE instruction from causing a spill exception. If the |
* CANSAVE register is zero, explicitly spill register window |
* at CWP + 2. |
*/ |
rdpr %cansave, %g3 |
brnz %g3, 2f |
nop |
INLINE_SPILL %g3, %g4 |
2: |
/* ask for new register window */ |
save %sp, -PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE, %sp |
MIDDLE_PART 0 |
4: |
/* |
* Prevent RESTORE instruction from causing a fill exception. If the |
681,9 → 714,232 |
retry |
.endm |
/* |
* Spills the window at CWP + 2 to the userspace window buffer. This macro |
* is to be used before doing SAVE when the spill trap is undesirable. |
* |
* Parameters: |
* tmpreg1 global register to be used for scratching purposes |
* tmpreg2 global register to be used for scratching purposes |
* tmpreg3 global register to be used for scratching purposes |
*/ |
.macro INLINE_SPILL_TO_WBUF tmpreg1, tmpreg2, tmpreg3 |
! CWP := CWP + 2 |
rdpr %cwp, \tmpreg2 |
add \tmpreg2, 2, \tmpreg1 |
and \tmpreg1, NWINDOWS - 1, \tmpreg1 ! modulo NWINDOWS |
wrpr \tmpreg1, %cwp |
#define NOT(x) ((x) == 0) |
! spill to userspace window buffer |
SAVE_TO_USPACE_WBUF \tmpreg3, \tmpreg1 |
! CWP := CWP - 2 |
wrpr \tmpreg2, %cwp |
saved |
.endm |
/* |
* Preemptible handler for handling traps from userspace. |
*/ |
.macro PREEMPTIBLE_HANDLER_USPACE is_syscall |
/* |
* One of the ways this handler can be invoked is after a nested MMU trap from |
* either spill_1_normal or fill_1_normal traps. Both of these traps manipulate |
* the CWP register. We deal with the situation by simulating the MMU trap |
* on TL=1 and restart the respective SAVE or RESTORE instruction once the MMU |
* trap is resolved. However, because we are in the wrong window from the |
* perspective of the MMU trap, we need to synchronize CWP with CWP from TL=0. |
*/ |
.if NOT(\is_syscall) |
rdpr %tstate, %g3 |
and %g3, TSTATE_CWP_MASK, %g4 |
wrpr %g4, 0, %cwp ! resynchronize CWP |
.endif |
/* prevent unnecessary CLEANWIN exceptions */ |
wrpr %g0, NWINDOWS - 1, %cleanwin |
/* |
* Prevent SAVE instruction from causing a spill exception. If the |
* CANSAVE register is zero, explicitly spill register window |
* at CWP + 2. |
*/ |
rdpr %cansave, %g3 |
brnz %g3, 2f |
nop |
INLINE_SPILL_TO_WBUF %g3, %g4, %g7 |
2: |
get_kstack_wbuf_ptr %g3, %g4 |
ldx [%g4], %g6 |
save %g6, -PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE, %sp |
.if \is_syscall |
/* Copy arguments for the syscall to the new window. */ |
mov %i0, %o0 |
mov %i1, %o1 |
mov %i2, %o2 |
mov %i3, %o3 |
mov %i4, %o4 |
mov %i5, %o5 |
.endif |
mov VA_PRIMARY_CONTEXT_REG, %l0 |
stxa %g0, [%l0] ASI_PRIMARY_CONTEXT_REG |
rd %pc, %l0 |
flush %l0 |
/* Mark the CANRESTORE windows as OTHER windows. */ |
rdpr %canrestore, %l0 |
wrpr %l0, %otherwin |
wrpr %g0, %canrestore |
/* |
* Other window spills will go to the userspace window buffer |
* and normal spills will go to the kernel stack. |
*/ |
wrpr %g0, WSTATE_OTHER(0) | WSTATE_NORMAL(0), %wstate |
MIDDLE_PART \is_syscall |
4: |
/* |
* Spills and fills will be processed by the {spill,fill}_1_normal |
* handlers. |
*/ |
wrpr %g0, WSTATE_OTHER(0) | WSTATE_NORMAL(1), %wstate |
/* |
* Set primary context according to secondary context. |
*/ |
wr %g0, ASI_SECONDARY_CONTEXT_REG, %asi |
ldxa [VA_SECONDARY_CONTEXT_REG] %asi, %g1 |
wr %g0, ASI_PRIMARY_CONTEXT_REG, %asi |
stxa %g1, [VA_PRIMARY_CONTEXT_REG] %asi |
rd %pc, %g1 |
flush %g1 |
/* Restoring userspace windows: */ |
/* Save address of the userspace window buffer to the %g7 register. */ |
get_kstack_wbuf_ptr %g1, %g5 |
ldx [%g5 + 8], %g7 |
rdpr %cwp, %g1 |
rdpr %otherwin, %g2 |
/* |
* Skip all OTHERWIN windows and descend to the first window |
* in the userspace window buffer. |
*/ |
sub %g1, %g2, %g3 |
dec %g3 |
and %g3, NWINDOWS - 1, %g3 |
wrpr %g3, 0, %cwp |
/* |
* CWP is now in the window last saved in the userspace window buffer. |
* Fill all windows stored in the buffer. |
*/ |
clr %g4 |
5: andcc %g7, UWB_ALIGNMENT - 1, %g0 ! alignment check |
bz 6f ! %g7 is UWB_ALIGNMENT-aligned, no more windows to refill |
nop |
add %g7, -STACK_WINDOW_SAVE_AREA_SIZE, %g7 |
ldx [%g7 + L0_OFFSET], %l0 |
ldx [%g7 + L1_OFFSET], %l1 |
ldx [%g7 + L2_OFFSET], %l2 |
ldx [%g7 + L3_OFFSET], %l3 |
ldx [%g7 + L4_OFFSET], %l4 |
ldx [%g7 + L5_OFFSET], %l5 |
ldx [%g7 + L6_OFFSET], %l6 |
ldx [%g7 + L7_OFFSET], %l7 |
ldx [%g7 + I0_OFFSET], %i0 |
ldx [%g7 + I1_OFFSET], %i1 |
ldx [%g7 + I2_OFFSET], %i2 |
ldx [%g7 + I3_OFFSET], %i3 |
ldx [%g7 + I4_OFFSET], %i4 |
ldx [%g7 + I5_OFFSET], %i5 |
ldx [%g7 + I6_OFFSET], %i6 |
ldx [%g7 + I7_OFFSET], %i7 |
dec %g3 |
and %g3, NWINDOWS - 1, %g3 |
wrpr %g3, 0, %cwp ! switch to the preceeding window |
ba 5b |
inc %g4 |
6: |
/* Save changes of the address of the userspace window buffer. */ |
stx %g7, [%g5 + 8] |
/* |
* Switch back to the proper current window and adjust |
* OTHERWIN, CANRESTORE, CANSAVE and CLEANWIN. |
*/ |
wrpr %g1, 0, %cwp |
add %g4, %g2, %g2 |
cmp %g2, NWINDOWS - 2 |
bg 8f ! fix the CANRESTORE=NWINDOWS-1 anomaly |
mov NWINDOWS - 2, %g1 ! use dealy slot for both cases |
sub %g1, %g2, %g1 |
wrpr %g0, 0, %otherwin |
wrpr %g1, 0, %cansave ! NWINDOWS - 2 - CANRESTORE |
wrpr %g2, 0, %canrestore ! OTHERWIN + windows in the buffer |
wrpr %g2, 0, %cleanwin ! avoid information leak |
7: |
restore |
.if \is_syscall |
done |
.else |
retry |
.endif |
8: |
/* |
* We got here in order to avoid inconsistency of the window state registers. |
* If the: |
* |
* save %g6, -PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE, %sp |
* |
* instruction trapped and spilled a register window into the userspace |
* window buffer, we have just restored NWINDOWS - 1 register windows. |
* However, CANRESTORE can be only NWINDOW - 2 at most. |
* |
* The solution is to manually switch to (CWP - 1) mod NWINDOWS |
* and set the window state registers so that: |
* |
* CANRESTORE = NWINDOWS - 2 |
* CLEANWIN = NWINDOWS - 2 |
* CANSAVE = 0 |
* OTHERWIN = 0 |
* |
* The RESTORE instruction is therfore to be skipped. |
*/ |
wrpr %g0, 0, %otherwin |
wrpr %g0, 0, %cansave |
wrpr %g1, 0, %canrestore |
wrpr %g1, 0, %cleanwin |
rdpr %cwp, %g1 |
dec %g1 |
and %g1, NWINDOWS - 1, %g1 |
wrpr %g1, 0, %cwp ! CWP-- |
.if \is_syscall |
done |
.else |
retry |
.endif |
.endm |
/* Preemptible trap handler for TL=1. |
* |
* This trap handler makes arrangements to make calling of scheduler() from |
691,7 → 947,19 |
* handlers. |
*/ |
.macro PREEMPTIBLE_HANDLER_TEMPLATE is_syscall |
rdpr %tstate, %g3 |
and %g3, TSTATE_PRIV_BIT, %g3 |
brz %g3, 100f ! trapping from userspace |
nop |
PREEMPTIBLE_HANDLER_KERNEL |
ba 101f |
nop |
100: |
PREEMPTIBLE_HANDLER_USPACE \is_syscall |
101: |
.endm |
.global preemptible_handler |
/branches/sparc/kernel/arch/sparc64/src/mm/sun4v/tlb.c |
---|
52,20 → 52,21 |
#include <panic.h> |
#include <arch/asm.h> |
#include <arch/cpu.h> |
#include <arch/mm/pagesize.h> |
#ifdef CONFIG_TSB |
#include <arch/mm/tsb.h> |
#endif |
#if 0 |
static void dtlb_pte_copy(pte_t *, index_t, bool); |
static void itlb_pte_copy(pte_t *, index_t); |
static void itlb_pte_copy(pte_t *); |
static void dtlb_pte_copy(pte_t *, bool); |
static void do_fast_instruction_access_mmu_miss_fault(istate_t *, const char *); |
static void do_fast_data_access_mmu_miss_fault(istate_t *, tlb_tag_access_reg_t, |
static void do_fast_data_access_mmu_miss_fault(istate_t *, uint64_t, |
const char *); |
static void do_fast_data_access_protection_fault(istate_t *, |
tlb_tag_access_reg_t, const char *); |
uint64_t, const char *); |
#if 0 |
char *context_encoding[] = { |
"Primary", |
"Secondary", |
75,6 → 76,21 |
#endif |
/* |
* The assembly language routine passes a 64-bit parameter to the Data Access |
* MMU Miss and Data Access protection handlers, the parameter encapsulates |
* a virtual address of the faulting page and the faulting context. The most |
* significant 51 bits represent the VA of the faulting page and the least |
* significant 13 vits represent the faulting context. The following macros |
* extract the page and context out of the 64-bit parameter: |
*/ |
/* extracts the VA of the faulting page */ |
#define DMISS_ADDRESS(page_and_ctx) (((page_and_ctx) >> 13) << 13) |
/* extracts the faulting context */ |
#define DMISS_CONTEXT(page_and_ctx) ((page_and_ctx) & 0x1fff) |
/* |
* Invalidate all non-locked DTLB and ITLB entries. |
*/ |
void tlb_arch_init(void) |
127,90 → 143,66 |
/** Copy PTE to TLB. |
* |
* @param t Page Table Entry to be copied. |
* @param index Zero if lower 8K-subpage, one if higher 8K-subpage. |
* @param ro If true, the entry will be created read-only, regardless |
* of its w field. |
*/ |
#if 0 |
void dtlb_pte_copy(pte_t *t, index_t index, bool ro) |
void dtlb_pte_copy(pte_t *t, bool ro) |
{ |
tlb_tag_access_reg_t tag; |
tlb_data_t data; |
page_address_t pg; |
frame_address_t fr; |
tte_data_t data; |
pg.address = t->page + (index << MMU_PAGE_WIDTH); |
fr.address = t->frame + (index << MMU_PAGE_WIDTH); |
tag.value = 0; |
tag.context = t->as->asid; |
tag.vpn = pg.vpn; |
dtlb_tag_access_write(tag.value); |
data.value = 0; |
data.v = true; |
data.size = PAGESIZE_8K; |
data.pfn = fr.pfn; |
data.l = false; |
data.nfo = false; |
data.ra = (t->frame) >> FRAME_WIDTH; |
data.ie = false; |
data.e = false; |
data.cp = t->c; |
#ifdef CONFIG_VIRT_IDX_DCACHE |
data.cv = t->c; |
#endif /* CONFIG_VIRT_IDX_DCACHE */ |
data.p = t->k; /* p like privileged */ |
#endif |
data.p = t->k; |
data.x = false; |
data.w = ro ? false : t->w; |
data.g = t->g; |
data.size = PAGESIZE_8K; |
dtlb_data_in_write(data.value); |
__hypercall_hyperfast( |
t->page, t->as->asid, data.value, MMU_FLAG_DTLB, 0, MMU_MAP_ADDR); |
} |
#endif |
/** Copy PTE to ITLB. |
* |
* @param t Page Table Entry to be copied. |
* @param index Zero if lower 8K-subpage, one if higher 8K-subpage. |
*/ |
#if 0 |
void itlb_pte_copy(pte_t *t, index_t index) |
void itlb_pte_copy(pte_t *t) |
{ |
tlb_tag_access_reg_t tag; |
tlb_data_t data; |
page_address_t pg; |
frame_address_t fr; |
tte_data_t data; |
pg.address = t->page + (index << MMU_PAGE_WIDTH); |
fr.address = t->frame + (index << MMU_PAGE_WIDTH); |
tag.value = 0; |
tag.context = t->as->asid; |
tag.vpn = pg.vpn; |
itlb_tag_access_write(tag.value); |
data.value = 0; |
data.v = true; |
data.size = PAGESIZE_8K; |
data.pfn = fr.pfn; |
data.l = false; |
data.nfo = false; |
data.ra = (t->frame) >> FRAME_WIDTH; |
data.ie = false; |
data.e = false; |
data.cp = t->c; |
data.p = t->k; /* p like privileged */ |
data.cv = false; |
data.p = t->k; |
data.x = true; |
data.w = false; |
data.g = t->g; |
data.size = PAGESIZE_8K; |
itlb_data_in_write(data.value); |
__hypercall_hyperfast( |
t->page, t->as->asid, data.value, MMU_FLAG_ITLB, 0, MMU_MAP_ADDR); |
} |
#endif |
/** ITLB miss handler. */ |
void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate) |
{ |
#if 0 |
uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE); |
index_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE; |
pte_t *t; |
page_table_lock(AS, true); |
t = page_mapping_find(AS, va); |
if (t && PTE_EXECUTABLE(t)) { |
/* |
* The mapping was found in the software page hash table. |
217,9 → 209,9 |
* Insert it into ITLB. |
*/ |
t->a = true; |
itlb_pte_copy(t, index); |
itlb_pte_copy(t); |
#ifdef CONFIG_TSB |
itsb_pte_copy(t, index); |
//itsb_pte_copy(t, index); |
#endif |
page_table_unlock(AS, true); |
} else { |
233,7 → 225,6 |
__func__); |
} |
} |
#endif |
} |
/** DTLB miss handler. |
241,29 → 232,27 |
* Note that some faults (e.g. kernel faults) were already resolved by the |
* low-level, assembly language part of the fast_data_access_mmu_miss handler. |
* |
* @param tag Content of the TLB Tag Access register as it existed |
* when the trap happened. This is to prevent confusion |
* created by clobbered Tag Access register during a nested |
* DTLB miss. |
* @param page_and_ctx A 64-bit value describing the fault. The most |
* significant 51 bits of the value contain the virtual |
* address which caused the fault truncated to the page |
* boundary. The least significant 13 bits of the value |
* contain the number of the context in which the fault |
* occurred. |
* @param istate Interrupted state saved on the stack. |
*/ |
//void fast_data_access_mmu_miss(tlb_tag_access_reg_t tag, istate_t *istate) |
//{ |
#if 0 |
uintptr_t va; |
index_t index; |
void fast_data_access_mmu_miss(uint64_t page_and_ctx, istate_t *istate) |
{ |
pte_t *t; |
uintptr_t va = DMISS_ADDRESS(page_and_ctx); |
uint16_t ctx = DMISS_CONTEXT(page_and_ctx); |
va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE); |
index = tag.vpn % MMU_PAGES_PER_PAGE; |
if (tag.context == ASID_KERNEL) { |
if (!tag.vpn) { |
if (ctx == ASID_KERNEL) { |
if (va == 0) { |
/* NULL access in kernel */ |
do_fast_data_access_mmu_miss_fault(istate, tag, |
do_fast_data_access_mmu_miss_fault(istate, page_and_ctx, |
__func__); |
} |
do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected " |
do_fast_data_access_mmu_miss_fault(istate, page_and_ctx, "Unexpected " |
"kernel page fault."); |
} |
275,9 → 264,9 |
* Insert it into DTLB. |
*/ |
t->a = true; |
dtlb_pte_copy(t, index, true); |
dtlb_pte_copy(t, true); |
#ifdef CONFIG_TSB |
dtsb_pte_copy(t, index, true); |
//dtsb_pte_copy(t, true); |
#endif |
page_table_unlock(AS, true); |
} else { |
287,31 → 276,28 |
*/ |
page_table_unlock(AS, true); |
if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { |
do_fast_data_access_mmu_miss_fault(istate, tag, |
do_fast_data_access_mmu_miss_fault(istate, page_and_ctx, |
__func__); |
} |
} |
#endif |
//} |
} |
/** DTLB protection fault handler. |
* |
* @param tag Content of the TLB Tag Access register as it existed |
* when the trap happened. This is to prevent confusion |
* created by clobbered Tag Access register during a nested |
* DTLB miss. |
* @param page_and_ctx A 64-bit value describing the fault. The most |
* significant 51 bits of the value contain the virtual |
* address which caused the fault truncated to the page |
* boundary. The least significant 13 bits of the value |
* contain the number of the context in which the fault |
* occurred. |
* @param istate Interrupted state saved on the stack. |
*/ |
//void fast_data_access_protection(tlb_tag_access_reg_t tag, istate_t *istate) |
//{ |
#if 0 |
uintptr_t va; |
index_t index; |
void fast_data_access_protection(uint64_t page_and_ctx, istate_t *istate) |
{ |
pte_t *t; |
uintptr_t va = DMISS_ADDRESS(page_and_ctx); |
uint16_t ctx = DMISS_CONTEXT(page_and_ctx); |
va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE); |
index = tag.vpn % MMU_PAGES_PER_PAGE; /* 16K-page emulation */ |
page_table_lock(AS, true); |
t = page_mapping_find(AS, va); |
if (t && PTE_WRITABLE(t)) { |
322,11 → 308,10 |
*/ |
t->a = true; |
t->d = true; |
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY, |
va + index * MMU_PAGE_SIZE); |
dtlb_pte_copy(t, index, false); |
mmu_demap_page(va, ctx, MMU_FLAG_DTLB); |
dtlb_pte_copy(t, false); |
#ifdef CONFIG_TSB |
dtsb_pte_copy(t, index, false); |
//dtsb_pte_copy(t, false); |
#endif |
page_table_unlock(AS, true); |
} else { |
336,12 → 321,11 |
*/ |
page_table_unlock(AS, true); |
if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) { |
do_fast_data_access_protection_fault(istate, tag, |
do_fast_data_access_protection_fault(istate, page_and_ctx, |
__func__); |
} |
} |
#endif |
//} |
} |
/** Print TLB entry (for debugging purposes). |
* |
363,12 → 347,9 |
} |
#endif |
#if defined (US) |
/** Print contents of both TLBs. */ |
void tlb_print(void) |
{ |
#if 0 |
{ |
int i; |
tlb_data_t d; |
tlb_tag_read_reg_t t; |
389,56 → 370,6 |
#endif |
} |
#elif defined (US3) |
/** Print contents of all TLBs. */ |
void tlb_print(void) |
{ |
#if 0 |
int i; |
tlb_data_t d; |
tlb_tag_read_reg_t t; |
printf("TLB_ISMALL contents:\n"); |
for (i = 0; i < tlb_ismall_size(); i++) { |
d.value = dtlb_data_access_read(TLB_ISMALL, i); |
t.value = dtlb_tag_read_read(TLB_ISMALL, i); |
print_tlb_entry(i, t, d); |
} |
printf("TLB_IBIG contents:\n"); |
for (i = 0; i < tlb_ibig_size(); i++) { |
d.value = dtlb_data_access_read(TLB_IBIG, i); |
t.value = dtlb_tag_read_read(TLB_IBIG, i); |
print_tlb_entry(i, t, d); |
} |
printf("TLB_DSMALL contents:\n"); |
for (i = 0; i < tlb_dsmall_size(); i++) { |
d.value = dtlb_data_access_read(TLB_DSMALL, i); |
t.value = dtlb_tag_read_read(TLB_DSMALL, i); |
print_tlb_entry(i, t, d); |
} |
printf("TLB_DBIG_1 contents:\n"); |
for (i = 0; i < tlb_dbig_size(); i++) { |
d.value = dtlb_data_access_read(TLB_DBIG_0, i); |
t.value = dtlb_tag_read_read(TLB_DBIG_0, i); |
print_tlb_entry(i, t, d); |
} |
printf("TLB_DBIG_2 contents:\n"); |
for (i = 0; i < tlb_dbig_size(); i++) { |
d.value = dtlb_data_access_read(TLB_DBIG_1, i); |
t.value = dtlb_tag_read_read(TLB_DBIG_1, i); |
print_tlb_entry(i, t, d); |
} |
#endif |
} |
#endif |
#if 0 |
void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, |
const char *str) |
{ |
446,81 → 377,35 |
dump_istate(istate); |
panic("%s\n", str); |
} |
#endif |
#if 0 |
void do_fast_data_access_mmu_miss_fault(istate_t *istate, |
tlb_tag_access_reg_t tag, const char *str) |
uint64_t page_and_ctx, const char *str) |
{ |
uintptr_t va; |
va = tag.vpn << MMU_PAGE_WIDTH; |
if (tag.context) { |
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va, |
tag.context); |
if (DMISS_CONTEXT(page_and_ctx)) { |
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, DMISS_ADDRESS(page_and_ctx), |
DMISS_CONTEXT(page_and_ctx)); |
} |
dump_istate(istate); |
printf("Faulting page: %p, ASID=%d\n", va, tag.context); |
printf("Faulting page: %p, ASID=%d\n", DMISS_ADDRESS(page_and_ctx), DMISS_CONTEXT(page_and_ctx)); |
panic("%s\n", str); |
} |
#endif |
#if 0 |
void do_fast_data_access_protection_fault(istate_t *istate, |
tlb_tag_access_reg_t tag, const char *str) |
uint64_t page_and_ctx, const char *str) |
{ |
uintptr_t va; |
va = tag.vpn << MMU_PAGE_WIDTH; |
if (tag.context) { |
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va, |
tag.context); |
if (DMISS_CONTEXT(page_and_ctx)) { |
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, DMISS_ADDRESS(page_and_ctx), |
DMISS_CONTEXT(page_and_ctx)); |
} |
printf("Faulting page: %p, ASID=%d\n", va, tag.context); |
printf("Faulting page: %p, ASID=%d\n", DMISS_ADDRESS(page_and_ctx), DMISS_CONTEXT(page_and_ctx)); |
dump_istate(istate); |
panic("%s\n", str); |
} |
#endif |
void describe_mmu_fault(void) |
{ |
} |
#if defined (US3) |
/** Invalidates given TLB entry if and only if it is non-locked or global. |
* |
* @param tlb TLB number (one of TLB_DSMALL, TLB_DBIG_0, TLB_DBIG_1, |
* TLB_ISMALL, TLB_IBIG). |
* @param entry Entry index within the given TLB. |
*/ |
#if 0 |
static void tlb_invalidate_entry(int tlb, index_t entry) |
{ |
tlb_data_t d; |
tlb_tag_read_reg_t t; |
if (tlb == TLB_DSMALL || tlb == TLB_DBIG_0 || tlb == TLB_DBIG_1) { |
d.value = dtlb_data_access_read(tlb, entry); |
if (!d.l || d.g) { |
t.value = dtlb_tag_read_read(tlb, entry); |
d.v = false; |
dtlb_tag_access_write(t.value); |
dtlb_data_access_write(tlb, entry, d.value); |
} |
} else if (tlb == TLB_ISMALL || tlb == TLB_IBIG) { |
d.value = itlb_data_access_read(tlb, entry); |
if (!d.l || d.g) { |
t.value = itlb_tag_read_read(tlb, entry); |
d.v = false; |
itlb_tag_access_write(t.value); |
itlb_data_access_write(tlb, entry, d.value); |
} |
} |
} |
#endif |
#endif |
/** Invalidate all unlocked ITLB and DTLB entries. */ |
void tlb_invalidate_all(void) |
{ |
/branches/sparc/kernel/arch/sparc64/src/sun4u/start.S |
---|
283,7 → 283,6 |
or %sp, %lo(temporary_boot_stack), %sp |
sub %sp, STACK_BIAS, %sp |
sethi 0x42142, %g0 |
sethi %hi(bootinfo), %o0 |
call memcpy ! copy bootinfo |
or %o0, %lo(bootinfo), %o0 |