/trunk/kernel/arch/sparc64/include/arch.h |
---|
37,11 → 37,13 |
#ifndef KERN_sparc64_ARCH_H_ |
#define KERN_sparc64_ARCH_H_ |
#define ASI_AIUP 0x10 /** Access to primary context with user privileges. */ |
#define ASI_AIUS 0x11 /** Access to secondary context with user privileges. */ |
#define ASI_AIUP 0x10 /** Access to primary context with user privileges. */ |
#define ASI_AIUS 0x11 /** Access to secondary context with user privileges. */ |
#define NWINDOW 8 /** Number of register window sets. */ |
#define ASI_NUCLEUS_QUAD_LDD 0x24 /** ASI for 16-byte atomic loads. */ |
#define NWINDOW 8 /** Number of register window sets. */ |
#endif |
/** @} |
/trunk/kernel/arch/sparc64/include/trap/mmu.h |
---|
44,6 → 44,10 |
#include <arch/mm/tte.h> |
#include <arch/trap/regwin.h> |
#ifdef CONFIG_TSB |
#include <arch/mm/tsb.h> |
#endif |
#define TT_FAST_INSTRUCTION_ACCESS_MMU_MISS 0x64 |
#define TT_FAST_DATA_ACCESS_MMU_MISS 0x68 |
#define TT_FAST_DATA_ACCESS_PROTECTION 0x6c |
56,8 → 60,19 |
/* |
* First, try to refill TLB from TSB. |
*/ |
! TODO |
#ifdef CONFIG_TSB |
ldxa [%g0] ASI_IMMU, %g1 ! read TSB Tag Target Register |
ldxa [%g0] ASI_IMMU_TSB_8KB_PTR_REG, %g2 ! read TSB 8K Pointer |
ldda [%g2] ASI_NUCLEUS_QUAD_LDD, %g4 ! 16-byte atomic load into %g4 and %g5 |
cmp %g1, %g4 ! is this the entry we are looking for? |
bne,pn %xcc, 0f |
nop |
stxa %g5, [%g0] ASI_ITLB_DATA_IN_REG ! copy mapping from ITSB to ITLB |
retry |
#endif |
0: |
wrpr %g0, PSTATE_PRIV_BIT | PSTATE_AG_BIT, %pstate |
PREEMPTIBLE_HANDLER fast_instruction_access_mmu_miss |
.endm |
66,8 → 81,20 |
/* |
* First, try to refill TLB from TSB. |
*/ |
! TODO |
#ifdef CONFIG_TSB |
ldxa [%g0] ASI_DMMU, %g1 ! read TSB Tag Target Register |
srlx %g1, TSB_TAG_TARGET_CONTEXT_SHIFT, %g2 ! is this kernel miss? |
brz,pn %g2, 0f |
ldxa [%g0] ASI_DMMU_TSB_8KB_PTR_REG, %g3 ! read TSB 8K Pointer |
ldda [%g3] ASI_NUCLEUS_QUAD_LDD, %g4 ! 16-byte atomic load into %g4 and %g5 |
cmp %g1, %g4 ! is this the entry we are looking for? |
bne,pn %xcc, 0f |
nop |
stxa %g5, [%g0] ASI_DTLB_DATA_IN_REG ! copy mapping from DTSB to DTLB |
retry |
#endif |
/* |
* Second, test if it is the portion of the kernel address space |
* which is faulting. If that is the case, immediately create |
76,7 → 103,7 |
* |
* Note that branch-delay slots are used in order to save space. |
*/ |
0: |
mov VA_DMMU_TAG_ACCESS, %g1 |
ldxa [%g1] ASI_DMMU, %g1 ! read the faulting Context and VPN |
set TLB_TAG_ACCESS_CONTEXT_MASK, %g2 |
110,13 → 137,9 |
.macro FAST_DATA_ACCESS_PROTECTION_HANDLER tl |
/* |
* First, try to refill TLB from TSB. |
* The same special case as in FAST_DATA_ACCESS_MMU_MISS_HANDLER. |
*/ |
! TODO |
/* |
* The same special case as in FAST_DATA_ACCESS_MMU_MISS_HANDLER. |
*/ |
.if (\tl > 0) |
wrpr %g0, 1, %tl |
.endif |
/trunk/kernel/arch/sparc64/include/mm/tte.h |
---|
50,6 → 50,8 |
#include <arch/types.h> |
#define VA_TAG_PAGE_SHIFT 22 |
/** Translation Table Entry - Tag. */ |
union tte_tag { |
uint64_t value; |
/trunk/kernel/arch/sparc64/include/mm/mmu.h |
---|
48,7 → 48,7 |
#define ASI_IMMU_DEMAP 0x57 |
/* Virtual Addresses within ASI_IMMU. */ |
#define VA_IMMU_TAG_TARGET 0x0 /**< IMMU tag target register. */ |
#define VA_IMMU_TSB_TAG_TARGET 0x0 /**< IMMU TSB tag target register. */ |
#define VA_IMMU_SFSR 0x18 /**< IMMU sync fault status register. */ |
#define VA_IMMU_TSB_BASE 0x28 /**< IMMU TSB base register. */ |
#define VA_IMMU_TAG_ACCESS 0x30 /**< IMMU TLB tag access register. */ |
64,7 → 64,7 |
#define ASI_DMMU_DEMAP 0x5f |
/* Virtual Addresses within ASI_DMMU. */ |
#define VA_DMMU_TAG_TARGET 0x0 /**< DMMU tag target register. */ |
#define VA_DMMU_TSB_TAG_TARGET 0x0 /**< DMMU TSB tag target register. */ |
#define VA_PRIMARY_CONTEXT_REG 0x8 /**< DMMU primary context register. */ |
#define VA_SECONDARY_CONTEXT_REG 0x10 /**< DMMU secondary context register. */ |
#define VA_DMMU_SFSR 0x18 /**< DMMU sync fault status register. */ |
/trunk/kernel/arch/sparc64/include/mm/tsb.h |
---|
35,11 → 35,6 |
#ifndef KERN_sparc64_TSB_H_ |
#define KERN_sparc64_TSB_H_ |
#include <arch/mm/tte.h> |
#include <arch/mm/mmu.h> |
#include <arch/types.h> |
#include <typedefs.h> |
/* |
* ITSB abd DTSB will claim 64K of memory, which |
* is a nice number considered that it is one of |
51,8 → 46,31 |
#define ITSB_ENTRY_COUNT (512*(1<<TSB_SIZE)) |
#define DTSB_ENTRY_COUNT (512*(1<<TSB_SIZE)) |
#define TSB_TAG_TARGET_CONTEXT_SHIFT 48 |
#ifndef __ASM__ |
#include <arch/mm/tte.h> |
#include <arch/mm/mmu.h> |
#include <arch/types.h> |
#include <typedefs.h> |
/** TSB Tag Target register. */ |
union tsb_tag_target { |
uint64_t value; |
struct { |
unsigned invalid : 1; /**< Invalidated by software. */ |
unsigned : 2; |
unsigned context : 13; /**< Software ASID. */ |
unsigned : 6; |
uint64_t va_tag : 42; /**< Virtual address bits <63:22>. */ |
} __attribute__ ((packed)); |
}; |
typedef union tsb_tag_target tsb_tag_target_t; |
/** TSB entry. */ |
struct tsb_entry { |
tte_tag_t tag; |
tsb_tag_target_t tag; |
tte_data_t data; |
} __attribute__ ((packed)); |
typedef struct tsb_entry tsb_entry_t; |
109,7 → 127,11 |
} |
extern void tsb_invalidate(as_t *as, uintptr_t page, count_t pages); |
extern void itsb_pte_copy(pte_t *t); |
extern void dtsb_pte_copy(pte_t *t, bool ro); |
#endif /* !def __ASM__ */ |
#endif |
/** @} |
/trunk/kernel/arch/sparc64/include/barrier.h |
---|
41,9 → 41,9 |
#define CS_ENTER_BARRIER() __asm__ volatile ("" ::: "memory") |
#define CS_LEAVE_BARRIER() __asm__ volatile ("" ::: "memory") |
#define memory_barrier() |
#define read_barrier() |
#define write_barrier() |
#define memory_barrier() __asm__ volatile ("membar #LoadLoad | #StoreStore\n" ::: "memory") |
#define read_barrier() __asm__ volatile ("membar #LoadLoad\n" ::: "memory") |
#define write_barrier() __asm__ volatile ("membar #StoreStore\n" ::: "memory") |
/** Flush Instruction Memory instruction. */ |
static inline void flush(void) |