/branches/sparc/kernel/generic/src/main/kinit.c |
---|
156,7 → 156,6 |
#endif /* CONFIG_KCONSOLE */ |
interrupts_enable(); |
/* |
* Create user tasks, load RAM disk images. |
*/ |
201,7 → 200,6 |
program_ready(&programs[i]); |
} |
} |
#ifdef CONFIG_KCONSOLE |
if (!stdin) { |
printf("kinit: No stdin\nKernel alive: "); |
/branches/sparc/kernel/generic/src/mm/as.c |
---|
1895,11 → 1895,12 |
unative_t sys_as_area_create(uintptr_t address, size_t size, int flags) |
{ |
if (as_area_create(AS, flags | AS_AREA_CACHEABLE, size, address, |
AS_AREA_ATTR_NONE, &anon_backend, NULL)) |
AS_AREA_ATTR_NONE, &anon_backend, NULL)) { |
return (unative_t) address; |
else |
} else { |
return (unative_t) -1; |
} |
} |
/** Wrapper for as_area_resize(). */ |
unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags) |
/branches/sparc/kernel/arch/sparc64/include/sun4v/asm.h |
---|
0,0 → 1,41 |
/* |
* Copyright (c) 2005 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup sparc64 |
* @{ |
*/ |
/** @file |
*/ |
#ifndef KERN_sparc64_sun4v_ASM_H_ |
#define KERN_sparc64_sun4v_ASM_H_ |
#endif |
/** @} |
*/ |
/branches/sparc/kernel/arch/sparc64/include/sun4v/cpu.h |
---|
35,9 → 35,29 |
#ifndef KERN_sparc64_sun4v_CPU_H_ |
#define KERN_sparc64_sun4v_CPU_H_ |
/* Maximum number of virtual processors. */ |
#define MAX_NUM_STRANDS 64 |
/* |
* Size of pair of pointers (one pointer to kernel stack, |
* one to uspace window buffer). |
*/ |
#define KSTACK_WBUF_PTR_SIZE (2 * 8) |
#ifndef __ASM__ |
/** |
* Pair of two pointers, the first one points to the kernel stack of |
* a userspace thread, the second one points to the userspace window |
* buffer of the userspace thread. For each CPU there exists exactly |
* one isntance of this structure. |
*/ |
typedef struct { |
uintptr_t kstack; |
uintptr_t wbuf; |
} __attribute__ ((packed)) kstack_wbuf_ptr ; |
typedef struct { |
uint64_t id; /**< virtual processor ID */ |
uint32_t mid; // TODO: left here only to keep the code compilable!!! |
uint32_t clock_frequency; /**< Processor frequency in Hz. */ |
46,9 → 66,32 |
matches this value. */ |
} cpu_arch_t; |
/* Maximum number of virtual processors. */ |
#define MAX_NUM_STRANDS 64 |
#endif |
#ifdef __ASM__ |
/* |
* Computes the pointer to the kstack_wbuf_ptr structure of the current CPU. |
* |
* Parameters: |
* tmpreg1 global register to be used for scratching purposes |
* result register where the resulting pointer will be saved |
*/ |
.macro get_kstack_wbuf_ptr tmpreg1, result |
! load CPUID to tmpreg1 |
or %g0, SCRATCHPAD_CPUID, \tmpreg1 |
ldxa [\tmpreg1] ASI_SCRATCHPAD, \tmpreg1 |
! compute offset within the array of kstack_wbuf_ptr structures (each |
! such structure is 16 bytes long) |
mulx \tmpreg1, KSTACK_WBUF_PTR_SIZE, \tmpreg1 |
! compute the pointer to the structure for the current CPU |
sethi %hi(kstack_wbuf_ptrs), \result |
or \result, %lo(kstack_wbuf_ptrs), \result |
add \result, \tmpreg1, \result |
.endm |
#endif |
#endif |
/branches/sparc/kernel/arch/sparc64/include/asm.h |
---|
433,14 → 433,15 |
extern void cpu_sleep(void); |
extern void asm_delay_loop(const uint32_t usec); |
extern uint64_t read_from_ag_g7(void); |
extern void write_to_ag_g6(uint64_t val); |
extern void write_to_ag_g7(uint64_t val); |
extern void write_to_ig_g6(uint64_t val); |
extern void switch_to_userspace(uint64_t pc, uint64_t sp, uint64_t uarg); |
#if defined(SUN4U) |
#include <arch/sun4u/asm.h> |
#elif defined (SUN4V) |
#include <arch/sun4v/asm.h> |
#endif |
#endif |
/** @} |
*/ |
/branches/sparc/kernel/arch/sparc64/include/trap/sun4v/regwin.h |
---|
0,0 → 1,86 |
/* |
* Copyright (c) 2005 Jakub Jermar |
* Copyright (c) 2009 Pavel Rimsky |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup sparc64interrupt |
* @{ |
*/ |
#ifndef KERN_sparc64_sun4v_REGWIN_H_ |
#define KERN_sparc64_sun4v_REGWIN_H_ |
#ifdef __ASM__ |
/* |
* Saves the contents of the current window to the userspace window buffer. |
* Does not modify any register window registers, but updates pointer to the |
* top of the userspace window buffer. |
* |
* Parameters: |
* \tmpreg1 global register to be used for scratching purposes |
* \tmpreg2 global register to be used for scratching purposes |
*/ |
.macro SAVE_TO_USPACE_WBUF tmpreg1, tmpreg2 |
get_kstack_wbuf_ptr \tmpreg1, \tmpreg2 |
ldx [\tmpreg2 + 8], \tmpreg1 |
stx %l0, [\tmpreg1 + L0_OFFSET] |
stx %l1, [\tmpreg1 + L1_OFFSET] |
stx %l2, [\tmpreg1 + L2_OFFSET] |
stx %l3, [\tmpreg1 + L3_OFFSET] |
stx %l4, [\tmpreg1 + L4_OFFSET] |
stx %l5, [\tmpreg1 + L5_OFFSET] |
stx %l6, [\tmpreg1 + L6_OFFSET] |
stx %l7, [\tmpreg1 + L7_OFFSET] |
stx %i0, [\tmpreg1 + I0_OFFSET] |
stx %i1, [\tmpreg1 + I1_OFFSET] |
stx %i2, [\tmpreg1 + I2_OFFSET] |
stx %i3, [\tmpreg1 + I3_OFFSET] |
stx %i4, [\tmpreg1 + I4_OFFSET] |
stx %i5, [\tmpreg1 + I5_OFFSET] |
stx %i6, [\tmpreg1 + I6_OFFSET] |
stx %i7, [\tmpreg1 + I7_OFFSET] |
add \tmpreg1, STACK_WINDOW_SAVE_AREA_SIZE, \tmpreg1 |
stx \tmpreg1, [\tmpreg2 + 8] |
.endm |
/* |
* Macro used to spill userspace window to userspace window buffer. |
* It is triggered from normal kernel code doing SAVE when |
* OTHERWIN>0 at (TL=0). |
*/ |
.macro SPILL_TO_USPACE_WINDOW_BUFFER |
SAVE_TO_USPACE_WBUF %g7, %g4 |
saved |
retry |
.endm |
#endif |
#endif |
/** @} |
*/ |
/branches/sparc/kernel/arch/sparc64/include/trap/sun4v/mmu.h |
---|
76,8 → 76,6 |
* Handler of the Fast Data Access MMU Miss trap. If the trap occurred in the kernel |
* (context 0), an identity mapping (with displacement) is installed. Otherwise |
* a higher level service routine is called. |
* |
* TODO implement calling the higher level service routine |
*/ |
.macro FAST_DATA_ACCESS_MMU_MISS_HANDLER tl |
84,11 → 82,9 |
mov SCRATCHPAD_MMU_FSA, %g1 |
ldxa [%g1] ASI_SCRATCHPAD, %g1 ! g1 <= RA of MMU fault status area |
/* service by higher-level routine when context != 0 */ |
/* read faulting context */ |
add %g1, FSA_DFC_OFFSET, %g2 ! g2 <= RA of data fault context |
ldxa [%g2] ASI_REAL, %g3 ! read the fault context |
brnz %g3, 0f |
nop |
/* read the faulting address */ |
add %g1, FSA_DFA_OFFSET, %g2 ! g2 <= RA of data fault address |
96,19 → 92,80 |
srlx %g1, TTE_DATA_TADDR_OFFSET, %g1 ! truncate it to page boundary |
sllx %g1, TTE_DATA_TADDR_OFFSET, %g1 |
/* service by higher-level routine when context != 0 */ |
brnz %g3, 0f |
nop |
/* exclude page number 0 from installing the identity mapping */ |
brz %g1, 0f |
nop |
/* installing the identity does not fit into 32 instructions, call a separate routine */ |
/* |
* Installing the identity does not fit into 32 instructions, call |
* a separate routine. The routine performs RETRY, hence the call never |
* returns. |
*/ |
ba install_identity_mapping |
nop |
0: ! TODO - call higher level service routine |
0: |
/* |
* One of the scenarios in which this trap can occur is when the |
* register window spill/fill handler accesses a memory which is not |
* mapped. In such a case, this handler will be called from TL = 1. |
* We handle the situation by pretending that the MMU miss occurred |
* on TL = 0. Once the MMU miss trap is services, the instruction which |
* caused the spill/fill trap is restarted, the spill/fill trap occurs, |
* but this time its handler accesse memory which IS mapped. |
*/ |
.if (\tl > 0) |
wrpr %g0, 1, %tl |
.endif |
/* |
* Save the faulting virtual page and faulting context to the %g2 |
* register. The most significant 51 bits of the %g2 register will |
* contain the virtual address which caused the fault truncated to the |
* page boundary. The least significant 13 bits of the %g2 register |
* will contain the number of the context in which the fault occurred. |
* The value of the %g2 register will be passed as a parameter to the |
* higher level service routine. |
*/ |
or %g1, %g3, %g2 |
PREEMPTIBLE_HANDLER fast_data_access_mmu_miss |
.endm |
/* |
* Handler of the Fast Data MMU Protection trap. Finds the trapping address |
* and context and calls higher level service routine. |
*/ |
.macro FAST_DATA_ACCESS_PROTECTION_HANDLER tl |
/* |
* The same special case as in FAST_DATA_ACCESS_MMU_MISS_HANDLER. |
*/ |
.if (\tl > 0) |
wrpr %g0, 1, %tl |
.endif |
mov SCRATCHPAD_MMU_FSA, %g1 |
ldxa [%g1] ASI_SCRATCHPAD, %g1 ! g1 <= RA of MMU fault status area |
/* read faulting context */ |
add %g1, FSA_DFC_OFFSET, %g2 ! g2 <= RA of data fault context |
ldxa [%g2] ASI_REAL, %g3 ! read the fault context |
/* read the faulting address */ |
add %g1, FSA_DFA_OFFSET, %g2 ! g2 <= RA of data fault address |
ldxa [%g2] ASI_REAL, %g1 ! read the fault address |
srlx %g1, TTE_DATA_TADDR_OFFSET, %g1 ! truncate it to page boundary |
sllx %g1, TTE_DATA_TADDR_OFFSET, %g1 |
/* the same as for FAST_DATA_ACCESS_MMU_MISS_HANDLER */ |
or %g1, %g3, %g2 |
PREEMPTIBLE_HANDLER fast_data_access_protection |
.endm |
#endif /* __ASM__ */ |
/branches/sparc/kernel/arch/sparc64/include/trap/regwin.h |
---|
130,35 → 130,6 |
.endm |
/* |
* Macro used to spill userspace window to userspace window buffer. |
* It can be either triggered from preemptible_handler doing SAVE |
* at (TL=1) or from normal kernel code doing SAVE when OTHERWIN>0 |
* at (TL=0). |
*/ |
.macro SPILL_TO_USPACE_WINDOW_BUFFER |
stx %l0, [%g7 + L0_OFFSET] |
stx %l1, [%g7 + L1_OFFSET] |
stx %l2, [%g7 + L2_OFFSET] |
stx %l3, [%g7 + L3_OFFSET] |
stx %l4, [%g7 + L4_OFFSET] |
stx %l5, [%g7 + L5_OFFSET] |
stx %l6, [%g7 + L6_OFFSET] |
stx %l7, [%g7 + L7_OFFSET] |
stx %i0, [%g7 + I0_OFFSET] |
stx %i1, [%g7 + I1_OFFSET] |
stx %i2, [%g7 + I2_OFFSET] |
stx %i3, [%g7 + I3_OFFSET] |
stx %i4, [%g7 + I4_OFFSET] |
stx %i5, [%g7 + I5_OFFSET] |
stx %i6, [%g7 + I6_OFFSET] |
stx %i7, [%g7 + I7_OFFSET] |
add %g7, STACK_WINDOW_SAVE_AREA_SIZE, %g7 |
saved |
retry |
.endm |
/* |
* Macro used by the nucleus and the primary context 0 during normal fills. |
*/ |
.macro FILL_NORMAL_HANDLER_KERNEL |
231,7 → 202,13 |
.endm |
#endif /* __ASM__ */ |
#if defined (SUN4U) |
#include <arch/trap/sun4u/regwin.h> |
#elif defined (SUN4V) |
#include <arch/trap/sun4v/regwin.h> |
#endif |
#endif |
/** @} |
*/ |
/branches/sparc/kernel/arch/sparc64/include/trap/sun4u/regwin.h |
---|
0,0 → 1,70 |
/* |
* Copyright (c) 2005 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup sparc64interrupt |
* @{ |
*/ |
#ifndef KERN_sparc64_sun4u_REGWIN_H_ |
#define KERN_sparc64_sun4u_REGWIN_H_ |
#ifdef __ASM__ |
/* |
* Macro used to spill userspace window to userspace window buffer. |
* It can be either triggered from preemptible_handler doing SAVE |
* at (TL=1) or from normal kernel code doing SAVE when OTHERWIN>0 |
* at (TL=0). |
*/ |
.macro SPILL_TO_USPACE_WINDOW_BUFFER |
stx %l0, [%g7 + L0_OFFSET] |
stx %l1, [%g7 + L1_OFFSET] |
stx %l2, [%g7 + L2_OFFSET] |
stx %l3, [%g7 + L3_OFFSET] |
stx %l4, [%g7 + L4_OFFSET] |
stx %l5, [%g7 + L5_OFFSET] |
stx %l6, [%g7 + L6_OFFSET] |
stx %l7, [%g7 + L7_OFFSET] |
stx %i0, [%g7 + I0_OFFSET] |
stx %i1, [%g7 + I1_OFFSET] |
stx %i2, [%g7 + I2_OFFSET] |
stx %i3, [%g7 + I3_OFFSET] |
stx %i4, [%g7 + I4_OFFSET] |
stx %i5, [%g7 + I5_OFFSET] |
stx %i6, [%g7 + I6_OFFSET] |
stx %i7, [%g7 + I7_OFFSET] |
add %g7, STACK_WINDOW_SAVE_AREA_SIZE, %g7 |
saved |
retry |
.endm |
#endif |
#endif |
/** @} |
*/ |
/branches/sparc/kernel/arch/sparc64/include/mm/sun4v/tlb.h |
---|
187,6 → 187,8 |
} |
extern void fast_instruction_access_mmu_miss(unative_t, istate_t *); |
extern void fast_data_access_mmu_miss(unative_t, istate_t *); |
extern void fast_data_access_protection(unative_t, istate_t *); |
extern void dtlb_insert_mapping(uintptr_t, uintptr_t, int, bool, bool); |
/branches/sparc/kernel/arch/sparc64/include/sun4u/asm.h |
---|
0,0 → 1,46 |
/* |
* Copyright (c) 2005 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup sparc64 |
* @{ |
*/ |
/** @file |
*/ |
#ifndef KERN_sparc64_sun4u_ASM_H_ |
#define KERN_sparc64_sun4u_ASM_H_ |
extern uint64_t read_from_ag_g7(void); |
extern void write_to_ag_g6(uint64_t val); |
extern void write_to_ag_g7(uint64_t val); |
extern void write_to_ig_g6(uint64_t val); |
#endif |
/** @} |
*/ |
/branches/sparc/kernel/arch/sparc64/Makefile.inc |
---|
112,6 → 112,7 |
arch/$(ARCH)/src/mm/$(USARCH)/tlb.c \ |
arch/$(ARCH)/src/mm/$(USARCH)/as.c \ |
arch/$(ARCH)/src/cpu/$(USARCH)/cpu.c \ |
arch/$(ARCH)/src/proc/$(USARCH)/scheduler.c |
ifeq ($(CONFIG_TSB),y) |
ARCH_SOURCES += \ |
131,7 → 132,6 |
arch/$(ARCH)/src/mm/cache.S \ |
arch/$(ARCH)/src/mm/frame.c \ |
arch/$(ARCH)/src/mm/page.c \ |
arch/$(ARCH)/src/proc/scheduler.c \ |
arch/$(ARCH)/src/proc/thread.c \ |
arch/$(ARCH)/src/trap/trap.c \ |
arch/$(ARCH)/src/trap/exception.c \ |
/branches/sparc/kernel/arch/sparc64/src/sun4v/asm.S |
---|
32,20 → 32,6 |
.text |
/* TODO: remove it as soon as there is a scheduler for sun4v. It is here only to make the code compilable/ */ |
.global write_to_ag_g6 |
write_to_ag_g6: |
.global write_to_ag_g7 |
write_to_ag_g7: |
.global write_to_ig_g6 |
write_to_ig_g6: |
.global read_from_ag_g7 |
read_from_ag_g7: |
/** Switch to userspace. |
* |
* %o0 Userspace entry address. |
54,7 → 40,6 |
*/ |
.global switch_to_userspace |
switch_to_userspace: |
#if 0 |
save %o1, -STACK_WINDOW_SAVE_AREA_SIZE, %sp |
flushw |
wrpr %g0, 0, %cleanwin ! avoid information leak |
90,4 → 75,3 |
wrpr %g0, WSTATE_OTHER(0) | WSTATE_NORMAL(1), %wstate |
done ! jump to userspace |
#endif |
/branches/sparc/kernel/arch/sparc64/src/sun4v/start.S |
---|
272,5 → 272,10 |
.align MMU_FSA_ALIGNMENT |
.global mmu_fsas |
mmu_fsas: |
!.space (MMU_FSA_SIZE * MAX_NUM_STRANDS) |
.space 8192 |
.space (MMU_FSA_SIZE * MAX_NUM_STRANDS) |
/* area containing kernel stack and uspace window buffer pointers of all CPUs */ |
.align KSTACK_WBUF_PTR_SIZE |
.global kstack_wbuf_ptrs |
kstack_wbuf_ptrs: |
.space (KSTACK_WBUF_PTR_SIZE * MAX_NUM_STRANDS) |
/branches/sparc/kernel/arch/sparc64/src/proc/scheduler.c |
---|
File deleted |
/branches/sparc/kernel/arch/sparc64/src/proc/sun4v/scheduler.c |
---|
0,0 → 1,80 |
/* |
* Copyright (c) 2006 Jakub Jermar |
* Copyright (c) 2009 Pavel Rimsky |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup sparc64proc |
* @{ |
*/ |
/** @file |
*/ |
#include <proc/scheduler.h> |
#include <proc/thread.h> |
#include <arch.h> |
#include <arch/asm.h> |
#include <arch/stack.h> |
#include <arch/sun4v/cpu.h> |
#include <arch/sun4v/hypercall.h> |
extern kstack_wbuf_ptr kstack_wbuf_ptrs[MAX_NUM_STRANDS]; |
/** Perform sparc64 specific tasks needed before the new task is run. */ |
void before_task_runs_arch(void) |
{ |
} |
/** Perform sparc64 specific steps before scheduling a thread. |
* |
* For userspace threads, initialize pointer to the kernel stack and for the |
* userspace window buffer. |
*/ |
void before_thread_runs_arch(void) |
{ |
if ((THREAD->flags & THREAD_FLAG_USPACE)) { |
uint64_t sp = (uintptr_t) THREAD->kstack + STACK_SIZE - |
(STACK_BIAS + ALIGN_UP(STACK_ITEM_SIZE, STACK_ALIGNMENT)); |
int cpuid = asi_u64_read(ASI_SCRATCHPAD, SCRATCHPAD_CPUID); |
kstack_wbuf_ptrs[cpuid].kstack = sp; |
kstack_wbuf_ptrs[cpuid].wbuf = |
(uintptr_t) THREAD->arch.uspace_window_buffer; |
} |
} |
/** Perform sparc64 specific steps before a thread stops running. */ |
void after_thread_ran_arch(void) |
{ |
if ((THREAD->flags & THREAD_FLAG_USPACE)) { |
/* sample the state of the userspace window buffer */ |
int cpuid = asi_u64_read(ASI_SCRATCHPAD, SCRATCHPAD_CPUID); |
THREAD->arch.uspace_window_buffer = |
(uint8_t *) kstack_wbuf_ptrs[cpuid].wbuf; |
} |
} |
/** @} |
*/ |
/branches/sparc/kernel/arch/sparc64/src/proc/sun4u/scheduler.c |
---|
0,0 → 1,83 |
/* |
* Copyright (c) 2006 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup sparc64proc |
* @{ |
*/ |
/** @file |
*/ |
#include <proc/scheduler.h> |
#include <proc/thread.h> |
#include <arch.h> |
#include <arch/asm.h> |
#include <arch/stack.h> |
/** Perform sparc64 specific tasks needed before the new task is run. */ |
void before_task_runs_arch(void) |
{ |
} |
/** Perform sparc64 specific steps before scheduling a thread. |
* |
* For userspace threads, initialize reserved global registers in the alternate |
* and interrupt sets. |
*/ |
void before_thread_runs_arch(void) |
{ |
if ((THREAD->flags & THREAD_FLAG_USPACE)) { |
/* |
* Write kernel stack address to %g6 of the alternate and |
* interrupt global sets. |
* |
* Write pointer to the last item in the userspace window buffer |
* to %g7 in the alternate set. Write to the interrupt %g7 is |
* not necessary because: |
* - spill traps operate only in the alternate global set, |
* - preemptible trap handler switches to alternate globals |
* before it explicitly uses %g7. |
*/ |
uint64_t sp = (uintptr_t) THREAD->kstack + STACK_SIZE - |
(STACK_BIAS + ALIGN_UP(STACK_ITEM_SIZE, STACK_ALIGNMENT)); |
write_to_ig_g6(sp); |
write_to_ag_g6(sp); |
write_to_ag_g7((uintptr_t) THREAD->arch.uspace_window_buffer); |
} |
} |
/** Perform sparc64 specific steps before a thread stops running. */ |
void after_thread_ran_arch(void) |
{ |
if ((THREAD->flags & THREAD_FLAG_USPACE)) { |
/* sample the state of the userspace window buffer */ |
THREAD->arch.uspace_window_buffer = (uint8_t *) read_from_ag_g7(); |
} |
} |
/** @} |
*/ |
/branches/sparc/kernel/arch/sparc64/src/trap/sun4v/trap_table.S |
---|
47,6 → 47,8 |
#include <arch/mm/page.h> |
#include <arch/stack.h> |
#include <arch/sun4v/regdef.h> |
#include <arch/sun4v/arch.h> |
#include <arch/sun4v/cpu.h> |
#define TABLE_SIZE TRAP_TABLE_SIZE |
#define ENTRY_SIZE TRAP_TABLE_ENTRY_SIZE |
292,7 → 294,7 |
.org trap_table + TT_FAST_DATA_ACCESS_PROTECTION*ENTRY_SIZE |
.global fast_data_access_protection_handler_tl0 |
fast_data_access_protection_handler_tl0: |
/*FAST_DATA_ACCESS_PROTECTION_HANDLER 0*/ |
FAST_DATA_ACCESS_PROTECTION_HANDLER 0 |
/* TT = 0x80, TL = 0, spill_0_normal handler */ |
.org trap_table + TT_SPILL_0_NORMAL*ENTRY_SIZE |
524,42 → 526,27 |
restored |
.endm |
/* |
* Preemptible trap handler for handling traps from kernel. |
*/ |
.macro PREEMPTIBLE_HANDLER_KERNEL |
#define NOT(x) ((x) == 0) |
/* |
* ASSERT(%tl == 1) |
* Perform all the actions of the preemptible trap handler which are common |
* for trapping from kernel and trapping from userspace, including call of the |
* higher level service routine. |
* |
* Important note: |
* This macro must be inserted between the "2:" and "4:" labels. The |
* inserting code must be aware of the usage of all the registers |
* contained in this macro. |
*/ |
rdpr %tl, %g3 |
cmp %g3, 1 |
be 1f |
nop |
0: ba 0b ! this is for debugging, if we ever get here |
nop ! it will be easy to find |
/* prevent unnecessary CLEANWIN exceptions */ |
wrpr %g0, NWINDOWS - 1, %cleanwin |
1: |
/* |
* Prevent SAVE instruction from causing a spill exception. If the |
* CANSAVE register is zero, explicitly spill register window |
* at CWP + 2. |
*/ |
rdpr %cansave, %g3 |
brnz %g3, 2f |
nop |
INLINE_SPILL %g3, %g4 |
2: |
/* ask for new register window */ |
save %sp, -PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE, %sp |
.macro MIDDLE_PART is_syscall |
/* copy higher level routine's address and its argument */ |
mov %g1, %l0 |
.if NOT(\is_syscall) |
mov %g2, %o0 |
.else |
! store the syscall number on the stack as 7th argument |
stx %g2, [%sp + STACK_WINDOW_SAVE_AREA_SIZE + STACK_BIAS + STACK_ARG6] |
.endif |
/* |
* Save TSTATE, TPC and TNPC aside. |
590,9 → 577,18 |
/* g1 -> l1, ..., g7 -> l7 */ |
SAVE_GLOBALS |
.if NOT(\is_syscall) |
/* call higher-level service routine, pass istate as its 2nd parameter */ |
call %l0 |
add %sp, PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_TNPC, %o1 |
.else |
/* Call the higher-level syscall handler. */ |
wrpr %g0, PSTATE_PRIV_BIT | PSTATE_PEF_BIT | PSTATE_IE_BIT, %pstate |
call syscall_handler |
nop |
/* copy the value returned by the syscall */ |
mov %o0, %i0 |
.endif |
/* l1 -> g1, ..., l7 -> g7 */ |
RESTORE_GLOBALS |
663,7 → 659,44 |
ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I5], %i5 |
ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I6], %i6 |
ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I7], %i7 |
.endm |
/* |
* Preemptible trap handler for handling traps from kernel. |
*/ |
.macro PREEMPTIBLE_HANDLER_KERNEL |
/* |
* ASSERT(%tl == 1) |
*/ |
rdpr %tl, %g3 |
cmp %g3, 1 |
be 1f |
nop |
0: ba 0b ! this is for debugging, if we ever get here |
nop ! it will be easy to find |
1: |
/* prevent unnecessary CLEANWIN exceptions */ |
wrpr %g0, NWINDOWS - 1, %cleanwin |
/* |
* Prevent SAVE instruction from causing a spill exception. If the |
* CANSAVE register is zero, explicitly spill register window |
* at CWP + 2. |
*/ |
rdpr %cansave, %g3 |
brnz %g3, 2f |
nop |
INLINE_SPILL %g3, %g4 |
2: |
/* ask for new register window */ |
save %sp, -PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE, %sp |
MIDDLE_PART 0 |
4: |
/* |
* Prevent RESTORE instruction from causing a fill exception. If the |
681,9 → 714,232 |
retry |
.endm |
/* |
* Spills the window at CWP + 2 to the userspace window buffer. This macro |
* is to be used before doing SAVE when the spill trap is undesirable. |
* |
* Parameters: |
* tmpreg1 global register to be used for scratching purposes |
* tmpreg2 global register to be used for scratching purposes |
* tmpreg3 global register to be used for scratching purposes |
*/ |
.macro INLINE_SPILL_TO_WBUF tmpreg1, tmpreg2, tmpreg3 |
! CWP := CWP + 2 |
rdpr %cwp, \tmpreg2 |
add \tmpreg2, 2, \tmpreg1 |
and \tmpreg1, NWINDOWS - 1, \tmpreg1 ! modulo NWINDOWS |
wrpr \tmpreg1, %cwp |
#define NOT(x) ((x) == 0) |
! spill to userspace window buffer |
SAVE_TO_USPACE_WBUF \tmpreg3, \tmpreg1 |
! CWP := CWP - 2 |
wrpr \tmpreg2, %cwp |
saved |
.endm |
/* |
* Preemptible handler for handling traps from userspace. |
*/ |
.macro PREEMPTIBLE_HANDLER_USPACE is_syscall |
/* |
* One of the ways this handler can be invoked is after a nested MMU trap from |
* either spill_1_normal or fill_1_normal traps. Both of these traps manipulate |
* the CWP register. We deal with the situation by simulating the MMU trap |
* on TL=1 and restart the respective SAVE or RESTORE instruction once the MMU |
* trap is resolved. However, because we are in the wrong window from the |
* perspective of the MMU trap, we need to synchronize CWP with CWP from TL=0. |
*/ |
.if NOT(\is_syscall) |
rdpr %tstate, %g3 |
and %g3, TSTATE_CWP_MASK, %g4 |
wrpr %g4, 0, %cwp ! resynchronize CWP |
.endif |
/* prevent unnecessary CLEANWIN exceptions */ |
wrpr %g0, NWINDOWS - 1, %cleanwin |
/* |
* Prevent SAVE instruction from causing a spill exception. If the |
* CANSAVE register is zero, explicitly spill register window |
* at CWP + 2. |
*/ |
rdpr %cansave, %g3 |
brnz %g3, 2f |
nop |
INLINE_SPILL_TO_WBUF %g3, %g4, %g7 |
2: |
get_kstack_wbuf_ptr %g3, %g4 |
ldx [%g4], %g6 |
save %g6, -PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE, %sp |
.if \is_syscall |
/* Copy arguments for the syscall to the new window. */ |
mov %i0, %o0 |
mov %i1, %o1 |
mov %i2, %o2 |
mov %i3, %o3 |
mov %i4, %o4 |
mov %i5, %o5 |
.endif |
mov VA_PRIMARY_CONTEXT_REG, %l0 |
stxa %g0, [%l0] ASI_PRIMARY_CONTEXT_REG |
rd %pc, %l0 |
flush %l0 |
/* Mark the CANRESTORE windows as OTHER windows. */ |
rdpr %canrestore, %l0 |
wrpr %l0, %otherwin |
wrpr %g0, %canrestore |
/* |
* Other window spills will go to the userspace window buffer |
* and normal spills will go to the kernel stack. |
*/ |
wrpr %g0, WSTATE_OTHER(0) | WSTATE_NORMAL(0), %wstate |
MIDDLE_PART \is_syscall |
4: |
/* |
* Spills and fills will be processed by the {spill,fill}_1_normal |
* handlers. |
*/ |
wrpr %g0, WSTATE_OTHER(0) | WSTATE_NORMAL(1), %wstate |
/* |
* Set primary context according to secondary context. |
*/ |
wr %g0, ASI_SECONDARY_CONTEXT_REG, %asi |
ldxa [VA_SECONDARY_CONTEXT_REG] %asi, %g1 |
wr %g0, ASI_PRIMARY_CONTEXT_REG, %asi |
stxa %g1, [VA_PRIMARY_CONTEXT_REG] %asi |
rd %pc, %g1 |
flush %g1 |
/* Restoring userspace windows: */ |
/* Save address of the userspace window buffer to the %g7 register. */ |
get_kstack_wbuf_ptr %g1, %g5 |
ldx [%g5 + 8], %g7 |
rdpr %cwp, %g1 |
rdpr %otherwin, %g2 |
/* |
* Skip all OTHERWIN windows and descend to the first window |
* in the userspace window buffer. |
*/ |
sub %g1, %g2, %g3 |
dec %g3 |
and %g3, NWINDOWS - 1, %g3 |
wrpr %g3, 0, %cwp |
/* |
* CWP is now in the window last saved in the userspace window buffer. |
* Fill all windows stored in the buffer. |
*/ |
clr %g4 |
5: andcc %g7, UWB_ALIGNMENT - 1, %g0 ! alignment check |
bz 6f ! %g7 is UWB_ALIGNMENT-aligned, no more windows to refill |
nop |
add %g7, -STACK_WINDOW_SAVE_AREA_SIZE, %g7 |
ldx [%g7 + L0_OFFSET], %l0 |
ldx [%g7 + L1_OFFSET], %l1 |
ldx [%g7 + L2_OFFSET], %l2 |
ldx [%g7 + L3_OFFSET], %l3 |
ldx [%g7 + L4_OFFSET], %l4 |
ldx [%g7 + L5_OFFSET], %l5 |
ldx [%g7 + L6_OFFSET], %l6 |
ldx [%g7 + L7_OFFSET], %l7 |
ldx [%g7 + I0_OFFSET], %i0 |
ldx [%g7 + I1_OFFSET], %i1 |
ldx [%g7 + I2_OFFSET], %i2 |
ldx [%g7 + I3_OFFSET], %i3 |
ldx [%g7 + I4_OFFSET], %i4 |
ldx [%g7 + I5_OFFSET], %i5 |
ldx [%g7 + I6_OFFSET], %i6 |
ldx [%g7 + I7_OFFSET], %i7 |
dec %g3 |
and %g3, NWINDOWS - 1, %g3 |
wrpr %g3, 0, %cwp ! switch to the preceeding window |
ba 5b |
inc %g4 |
6: |
/* Save changes of the address of the userspace window buffer. */ |
stx %g7, [%g5 + 8] |
/* |
* Switch back to the proper current window and adjust |
* OTHERWIN, CANRESTORE, CANSAVE and CLEANWIN. |
*/ |
wrpr %g1, 0, %cwp |
add %g4, %g2, %g2 |
cmp %g2, NWINDOWS - 2 |
bg 8f ! fix the CANRESTORE=NWINDOWS-1 anomaly |
mov NWINDOWS - 2, %g1 ! use dealy slot for both cases |
sub %g1, %g2, %g1 |
wrpr %g0, 0, %otherwin |
wrpr %g1, 0, %cansave ! NWINDOWS - 2 - CANRESTORE |
wrpr %g2, 0, %canrestore ! OTHERWIN + windows in the buffer |
wrpr %g2, 0, %cleanwin ! avoid information leak |
7: |
restore |
.if \is_syscall |
done |
.else |
retry |
.endif |
8: |
/* |
* We got here in order to avoid inconsistency of the window state registers. |
* If the: |
* |
* save %g6, -PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE, %sp |
* |
* instruction trapped and spilled a register window into the userspace |
* window buffer, we have just restored NWINDOWS - 1 register windows. |
* However, CANRESTORE can be only NWINDOW - 2 at most. |
* |
* The solution is to manually switch to (CWP - 1) mod NWINDOWS |
* and set the window state registers so that: |
* |
* CANRESTORE = NWINDOWS - 2 |
* CLEANWIN = NWINDOWS - 2 |
* CANSAVE = 0 |
* OTHERWIN = 0 |
* |
* The RESTORE instruction is therfore to be skipped. |
*/ |
wrpr %g0, 0, %otherwin |
wrpr %g0, 0, %cansave |
wrpr %g1, 0, %canrestore |
wrpr %g1, 0, %cleanwin |
rdpr %cwp, %g1 |
dec %g1 |
and %g1, NWINDOWS - 1, %g1 |
wrpr %g1, 0, %cwp ! CWP-- |
.if \is_syscall |
done |
.else |
retry |
.endif |
.endm |
/* Preemptible trap handler for TL=1. |
* |
* This trap handler makes arrangements to make calling of scheduler() from |
691,7 → 947,19 |
* handlers. |
*/ |
.macro PREEMPTIBLE_HANDLER_TEMPLATE is_syscall |
rdpr %tstate, %g3 |
and %g3, TSTATE_PRIV_BIT, %g3 |
brz %g3, 100f ! trapping from userspace |
nop |
PREEMPTIBLE_HANDLER_KERNEL |
ba 101f |
nop |
100: |
PREEMPTIBLE_HANDLER_USPACE \is_syscall |
101: |
.endm |
.global preemptible_handler |
/branches/sparc/kernel/arch/sparc64/src/mm/sun4v/tlb.c |
---|
52,20 → 52,21 |
#include <panic.h> |
#include <arch/asm.h> |
#include <arch/cpu.h> |
#include <arch/mm/pagesize.h> |
#ifdef CONFIG_TSB |
#include <arch/mm/tsb.h> |
#endif |
#if 0 |
static void dtlb_pte_copy(pte_t *, index_t, bool); |
static void itlb_pte_copy(pte_t *, index_t); |
static void itlb_pte_copy(pte_t *); |
static void dtlb_pte_copy(pte_t *, bool); |
static void do_fast_instruction_access_mmu_miss_fault(istate_t *, const char *); |
static void do_fast_data_access_mmu_miss_fault(istate_t *, tlb_tag_access_reg_t, |
static void do_fast_data_access_mmu_miss_fault(istate_t *, uint64_t, |
const char *); |
static void do_fast_data_access_protection_fault(istate_t *, |
tlb_tag_access_reg_t, const char *); |
uint64_t, const char *); |
#if 0 |
char *context_encoding[] = { |
"Primary", |
"Secondary", |
75,6 → 76,21 |
#endif |
/* |
* The assembly language routine passes a 64-bit parameter to the Data Access |
* MMU Miss and Data Access protection handlers, the parameter encapsulates |
* a virtual address of the faulting page and the faulting context. The most |
* significant 51 bits represent the VA of the faulting page and the least |
* significant 13 vits represent the faulting context. The following macros |
* extract the page and context out of the 64-bit parameter: |
*/ |
/* extracts the VA of the faulting page */ |
#define DMISS_ADDRESS(page_and_ctx) (((page_and_ctx) >> 13) << 13) |
/* extracts the faulting context */ |
#define DMISS_CONTEXT(page_and_ctx) ((page_and_ctx) & 0x1fff) |
/* |
* Invalidate all non-locked DTLB and ITLB entries. |
*/ |
void tlb_arch_init(void) |
127,90 → 143,66 |
/** Copy PTE to TLB. |
* |
* @param t Page Table Entry to be copied. |
* @param index Zero if lower 8K-subpage, one if higher 8K-subpage. |
* @param ro If true, the entry will be created read-only, regardless |
* of its w field. |
*/ |
#if 0 |
void dtlb_pte_copy(pte_t *t, index_t index, bool ro) |
void dtlb_pte_copy(pte_t *t, bool ro) |
{ |
tlb_tag_access_reg_t tag; |
tlb_data_t data; |
page_address_t pg; |
frame_address_t fr; |
tte_data_t data; |
pg.address = t->page + (index << MMU_PAGE_WIDTH); |
fr.address = t->frame + (index << MMU_PAGE_WIDTH); |
tag.value = 0; |
tag.context = t->as->asid; |
tag.vpn = pg.vpn; |
dtlb_tag_access_write(tag.value); |
data.value = 0; |
data.v = true; |
data.size = PAGESIZE_8K; |
data.pfn = fr.pfn; |
data.l = false; |
data.nfo = false; |
data.ra = (t->frame) >> FRAME_WIDTH; |
data.ie = false; |
data.e = false; |
data.cp = t->c; |
#ifdef CONFIG_VIRT_IDX_DCACHE |
data.cv = t->c; |
#endif /* CONFIG_VIRT_IDX_DCACHE */ |
data.p = t->k; /* p like privileged */ |
#endif |
data.p = t->k; |
data.x = false; |
data.w = ro ? false : t->w; |
data.g = t->g; |
data.size = PAGESIZE_8K; |
dtlb_data_in_write(data.value); |
__hypercall_hyperfast( |
t->page, t->as->asid, data.value, MMU_FLAG_DTLB, 0, MMU_MAP_ADDR); |
} |
#endif |
/** Copy PTE to ITLB. |
* |
* @param t Page Table Entry to be copied. |
* @param index Zero if lower 8K-subpage, one if higher 8K-subpage. |
*/ |
#if 0 |
void itlb_pte_copy(pte_t *t, index_t index) |
void itlb_pte_copy(pte_t *t) |
{ |
tlb_tag_access_reg_t tag; |
tlb_data_t data; |
page_address_t pg; |
frame_address_t fr; |
tte_data_t data; |
pg.address = t->page + (index << MMU_PAGE_WIDTH); |
fr.address = t->frame + (index << MMU_PAGE_WIDTH); |
tag.value = 0; |
tag.context = t->as->asid; |
tag.vpn = pg.vpn; |
itlb_tag_access_write(tag.value); |
data.value = 0; |
data.v = true; |
data.size = PAGESIZE_8K; |
data.pfn = fr.pfn; |
data.l = false; |
data.nfo = false; |
data.ra = (t->frame) >> FRAME_WIDTH; |
data.ie = false; |
data.e = false; |
data.cp = t->c; |
data.p = t->k; /* p like privileged */ |
data.cv = false; |
data.p = t->k; |
data.x = true; |
data.w = false; |
data.g = t->g; |
data.size = PAGESIZE_8K; |
itlb_data_in_write(data.value); |
__hypercall_hyperfast( |
t->page, t->as->asid, data.value, MMU_FLAG_ITLB, 0, MMU_MAP_ADDR); |
} |
#endif |
/** ITLB miss handler. */ |
void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate) |
{ |
#if 0 |
uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE); |
index_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE; |
pte_t *t; |
page_table_lock(AS, true); |
t = page_mapping_find(AS, va); |
if (t && PTE_EXECUTABLE(t)) { |
/* |
* The mapping was found in the software page hash table. |
217,9 → 209,9 |
* Insert it into ITLB. |
*/ |
t->a = true; |
itlb_pte_copy(t, index); |
itlb_pte_copy(t); |
#ifdef CONFIG_TSB |
itsb_pte_copy(t, index); |
//itsb_pte_copy(t, index); |
#endif |
page_table_unlock(AS, true); |
} else { |
233,7 → 225,6 |
__func__); |
} |
} |
#endif |
} |
/** DTLB miss handler. |
241,29 → 232,27 |
* Note that some faults (e.g. kernel faults) were already resolved by the |
* low-level, assembly language part of the fast_data_access_mmu_miss handler. |
* |
* @param tag Content of the TLB Tag Access register as it existed |
* when the trap happened. This is to prevent confusion |
* created by clobbered Tag Access register during a nested |
* DTLB miss. |
* @param page_and_ctx A 64-bit value describing the fault. The most |
* significant 51 bits of the value contain the virtual |
* address which caused the fault truncated to the page |
* boundary. The least significant 13 bits of the value |
* contain the number of the context in which the fault |
* occurred. |
* @param istate Interrupted state saved on the stack. |
*/ |
//void fast_data_access_mmu_miss(tlb_tag_access_reg_t tag, istate_t *istate) |
//{ |
#if 0 |
uintptr_t va; |
index_t index; |
void fast_data_access_mmu_miss(uint64_t page_and_ctx, istate_t *istate) |
{ |
pte_t *t; |
uintptr_t va = DMISS_ADDRESS(page_and_ctx); |
uint16_t ctx = DMISS_CONTEXT(page_and_ctx); |
va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE); |
index = tag.vpn % MMU_PAGES_PER_PAGE; |
if (tag.context == ASID_KERNEL) { |
if (!tag.vpn) { |
if (ctx == ASID_KERNEL) { |
if (va == 0) { |
/* NULL access in kernel */ |
do_fast_data_access_mmu_miss_fault(istate, tag, |
do_fast_data_access_mmu_miss_fault(istate, page_and_ctx, |
__func__); |
} |
do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected " |
do_fast_data_access_mmu_miss_fault(istate, page_and_ctx, "Unexpected " |
"kernel page fault."); |
} |
275,9 → 264,9 |
* Insert it into DTLB. |
*/ |
t->a = true; |
dtlb_pte_copy(t, index, true); |
dtlb_pte_copy(t, true); |
#ifdef CONFIG_TSB |
dtsb_pte_copy(t, index, true); |
//dtsb_pte_copy(t, true); |
#endif |
page_table_unlock(AS, true); |
} else { |
287,31 → 276,28 |
*/ |
page_table_unlock(AS, true); |
if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { |
do_fast_data_access_mmu_miss_fault(istate, tag, |
do_fast_data_access_mmu_miss_fault(istate, page_and_ctx, |
__func__); |
} |
} |
#endif |
//} |
} |
/** DTLB protection fault handler. |
* |
* @param tag Content of the TLB Tag Access register as it existed |
* when the trap happened. This is to prevent confusion |
* created by clobbered Tag Access register during a nested |
* DTLB miss. |
* @param page_and_ctx A 64-bit value describing the fault. The most |
* significant 51 bits of the value contain the virtual |
* address which caused the fault truncated to the page |
* boundary. The least significant 13 bits of the value |
* contain the number of the context in which the fault |
* occurred. |
* @param istate Interrupted state saved on the stack. |
*/ |
//void fast_data_access_protection(tlb_tag_access_reg_t tag, istate_t *istate) |
//{ |
#if 0 |
uintptr_t va; |
index_t index; |
void fast_data_access_protection(uint64_t page_and_ctx, istate_t *istate) |
{ |
pte_t *t; |
uintptr_t va = DMISS_ADDRESS(page_and_ctx); |
uint16_t ctx = DMISS_CONTEXT(page_and_ctx); |
va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE); |
index = tag.vpn % MMU_PAGES_PER_PAGE; /* 16K-page emulation */ |
page_table_lock(AS, true); |
t = page_mapping_find(AS, va); |
if (t && PTE_WRITABLE(t)) { |
322,11 → 308,10 |
*/ |
t->a = true; |
t->d = true; |
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY, |
va + index * MMU_PAGE_SIZE); |
dtlb_pte_copy(t, index, false); |
mmu_demap_page(va, ctx, MMU_FLAG_DTLB); |
dtlb_pte_copy(t, false); |
#ifdef CONFIG_TSB |
dtsb_pte_copy(t, index, false); |
//dtsb_pte_copy(t, false); |
#endif |
page_table_unlock(AS, true); |
} else { |
336,12 → 321,11 |
*/ |
page_table_unlock(AS, true); |
if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) { |
do_fast_data_access_protection_fault(istate, tag, |
do_fast_data_access_protection_fault(istate, page_and_ctx, |
__func__); |
} |
} |
#endif |
//} |
} |
/** Print TLB entry (for debugging purposes). |
* |
363,12 → 347,9 |
} |
#endif |
#if defined (US) |
/** Print contents of both TLBs. */ |
void tlb_print(void) |
{ |
#if 0 |
{ |
int i; |
tlb_data_t d; |
tlb_tag_read_reg_t t; |
389,56 → 370,6 |
#endif |
} |
#elif defined (US3) |
/** Print contents of all TLBs. */ |
void tlb_print(void) |
{ |
#if 0 |
int i; |
tlb_data_t d; |
tlb_tag_read_reg_t t; |
printf("TLB_ISMALL contents:\n"); |
for (i = 0; i < tlb_ismall_size(); i++) { |
d.value = dtlb_data_access_read(TLB_ISMALL, i); |
t.value = dtlb_tag_read_read(TLB_ISMALL, i); |
print_tlb_entry(i, t, d); |
} |
printf("TLB_IBIG contents:\n"); |
for (i = 0; i < tlb_ibig_size(); i++) { |
d.value = dtlb_data_access_read(TLB_IBIG, i); |
t.value = dtlb_tag_read_read(TLB_IBIG, i); |
print_tlb_entry(i, t, d); |
} |
printf("TLB_DSMALL contents:\n"); |
for (i = 0; i < tlb_dsmall_size(); i++) { |
d.value = dtlb_data_access_read(TLB_DSMALL, i); |
t.value = dtlb_tag_read_read(TLB_DSMALL, i); |
print_tlb_entry(i, t, d); |
} |
printf("TLB_DBIG_1 contents:\n"); |
for (i = 0; i < tlb_dbig_size(); i++) { |
d.value = dtlb_data_access_read(TLB_DBIG_0, i); |
t.value = dtlb_tag_read_read(TLB_DBIG_0, i); |
print_tlb_entry(i, t, d); |
} |
printf("TLB_DBIG_2 contents:\n"); |
for (i = 0; i < tlb_dbig_size(); i++) { |
d.value = dtlb_data_access_read(TLB_DBIG_1, i); |
t.value = dtlb_tag_read_read(TLB_DBIG_1, i); |
print_tlb_entry(i, t, d); |
} |
#endif |
} |
#endif |
#if 0 |
void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, |
const char *str) |
{ |
446,81 → 377,35 |
dump_istate(istate); |
panic("%s\n", str); |
} |
#endif |
#if 0 |
void do_fast_data_access_mmu_miss_fault(istate_t *istate, |
tlb_tag_access_reg_t tag, const char *str) |
uint64_t page_and_ctx, const char *str) |
{ |
uintptr_t va; |
va = tag.vpn << MMU_PAGE_WIDTH; |
if (tag.context) { |
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va, |
tag.context); |
if (DMISS_CONTEXT(page_and_ctx)) { |
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, DMISS_ADDRESS(page_and_ctx), |
DMISS_CONTEXT(page_and_ctx)); |
} |
dump_istate(istate); |
printf("Faulting page: %p, ASID=%d\n", va, tag.context); |
printf("Faulting page: %p, ASID=%d\n", DMISS_ADDRESS(page_and_ctx), DMISS_CONTEXT(page_and_ctx)); |
panic("%s\n", str); |
} |
#endif |
#if 0 |
void do_fast_data_access_protection_fault(istate_t *istate, |
tlb_tag_access_reg_t tag, const char *str) |
uint64_t page_and_ctx, const char *str) |
{ |
uintptr_t va; |
va = tag.vpn << MMU_PAGE_WIDTH; |
if (tag.context) { |
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va, |
tag.context); |
if (DMISS_CONTEXT(page_and_ctx)) { |
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, DMISS_ADDRESS(page_and_ctx), |
DMISS_CONTEXT(page_and_ctx)); |
} |
printf("Faulting page: %p, ASID=%d\n", va, tag.context); |
printf("Faulting page: %p, ASID=%d\n", DMISS_ADDRESS(page_and_ctx), DMISS_CONTEXT(page_and_ctx)); |
dump_istate(istate); |
panic("%s\n", str); |
} |
#endif |
void describe_mmu_fault(void) |
{ |
} |
#if defined (US3) |
/** Invalidates given TLB entry if and only if it is non-locked or global. |
* |
* @param tlb TLB number (one of TLB_DSMALL, TLB_DBIG_0, TLB_DBIG_1, |
* TLB_ISMALL, TLB_IBIG). |
* @param entry Entry index within the given TLB. |
*/ |
#if 0 |
static void tlb_invalidate_entry(int tlb, index_t entry) |
{ |
tlb_data_t d; |
tlb_tag_read_reg_t t; |
if (tlb == TLB_DSMALL || tlb == TLB_DBIG_0 || tlb == TLB_DBIG_1) { |
d.value = dtlb_data_access_read(tlb, entry); |
if (!d.l || d.g) { |
t.value = dtlb_tag_read_read(tlb, entry); |
d.v = false; |
dtlb_tag_access_write(t.value); |
dtlb_data_access_write(tlb, entry, d.value); |
} |
} else if (tlb == TLB_ISMALL || tlb == TLB_IBIG) { |
d.value = itlb_data_access_read(tlb, entry); |
if (!d.l || d.g) { |
t.value = itlb_tag_read_read(tlb, entry); |
d.v = false; |
itlb_tag_access_write(t.value); |
itlb_data_access_write(tlb, entry, d.value); |
} |
} |
} |
#endif |
#endif |
/** Invalidate all unlocked ITLB and DTLB entries. */ |
void tlb_invalidate_all(void) |
{ |
/branches/sparc/kernel/arch/sparc64/src/sun4u/start.S |
---|
283,7 → 283,6 |
or %sp, %lo(temporary_boot_stack), %sp |
sub %sp, STACK_BIAS, %sp |
sethi 0x42142, %g0 |
sethi %hi(bootinfo), %o0 |
call memcpy ! copy bootinfo |
or %o0, %lo(bootinfo), %o0 |