/trunk/kernel/arch/sparc64/include/boot/boot.h |
---|
39,6 → 39,7 |
#define VMA 0x400000 |
#define LMA VMA |
#ifndef __ASM__ |
#ifndef __LINKER__ |
#include <arch/types.h> |
91,6 → 92,7 |
extern bootinfo_t bootinfo; |
#endif |
#endif |
#endif |
/trunk/kernel/arch/sparc64/include/arch.h |
---|
35,10 → 35,6 |
#ifndef __sparc64_ARCH_H__ |
#define __sparc64_ARCH_H__ |
#include <arch/types.h> |
extern void take_over_tlb_and_tt(uintptr_t base); |
#endif |
/** @} |
/trunk/kernel/arch/sparc64/include/trap/trap.h |
---|
35,16 → 35,6 |
#ifndef __sparc64_TRAP_H__ |
#define __sparc64_TRAP_H__ |
#include <arch/trap/trap_table.h> |
#include <arch/asm.h> |
/** Switch to in-kernel trap table. */ |
static inline void trap_switch_trap_table(void) |
{ |
/* Point TBA to kernel trap table. */ |
tba_write((uint64_t) trap_table); |
} |
extern void trap_init(void); |
#endif |
/trunk/kernel/arch/sparc64/include/mm/tte.h |
---|
35,6 → 35,19 |
#ifndef __sparc64_TTE_H__ |
#define __sparc64_TTE_H__ |
#define TTE_G (1<<0) |
#define TTE_W (1<<1) |
#define TTE_P (1<<2) |
#define TTE_E (1<<3) |
#define TTE_CV (1<<4) |
#define TTE_CP (1<<5) |
#define TTE_L (1<<6) |
#define TTE_V_SHIFT 63 |
#define TTE_SIZE_SHIFT 61 |
#ifndef __ASM__ |
#include <arch/types.h> |
/** Translation Table Entry - Tag. */ |
75,6 → 88,8 |
typedef union tte_data tte_data_t; |
#endif /* !def __ASM__ */ |
#endif |
/** @} |
/trunk/kernel/arch/sparc64/include/mm/mmu.h |
---|
103,7 → 103,7 |
}; |
typedef union lsu_cr_reg lsu_cr_reg_t; |
#endif /* !__ASM__ */ |
#endif /* !def __ASM__ */ |
#endif |
/trunk/kernel/arch/sparc64/include/mm/tlb.h |
---|
35,17 → 35,13 |
#ifndef __sparc64_TLB_H__ |
#define __sparc64_TLB_H__ |
#include <arch/mm/tte.h> |
#include <arch/mm/mmu.h> |
#include <arch/mm/page.h> |
#include <arch/asm.h> |
#include <arch/barrier.h> |
#include <arch/types.h> |
#include <typedefs.h> |
#define ITLB_ENTRY_COUNT 64 |
#define DTLB_ENTRY_COUNT 64 |
#define MEM_CONTEXT_KERNEL 0 |
#define MEM_CONTEXT_TEMP 1 |
/** Page sizes. */ |
#define PAGESIZE_8K 0 |
#define PAGESIZE_64K 1 |
55,6 → 51,33 |
/** Bit width of the TLB-locked portion of kernel address space. */ |
#define KERNEL_PAGE_WIDTH 22 /* 4M */ |
/* TLB Demap Operation types. */ |
#define TLB_DEMAP_PAGE 0 |
#define TLB_DEMAP_CONTEXT 1 |
#define TLB_DEMAP_TYPE_SHIFT 6 |
/* TLB Demap Operation Context register encodings. */ |
#define TLB_DEMAP_PRIMARY 0 |
#define TLB_DEMAP_SECONDARY 1 |
#define TLB_DEMAP_NUCLEUS 2 |
#define TLB_DEMAP_CONTEXT_SHIFT 4 |
/* TLB Tag Access shifts */ |
#define TLB_TAG_ACCESS_CONTEXT_SHIFT 0 |
#define TLB_TAG_ACCESS_VPN_SHIFT 13 |
#ifndef __ASM__ |
#include <arch/mm/tte.h> |
#include <arch/mm/mmu.h> |
#include <arch/mm/page.h> |
#include <arch/asm.h> |
#include <arch/barrier.h> |
#include <arch/types.h> |
#include <typedefs.h> |
union tlb_context_reg { |
uint64_t v; |
struct { |
90,15 → 113,7 |
typedef union tlb_tag_read_reg tlb_tag_read_reg_t; |
typedef union tlb_tag_read_reg tlb_tag_access_reg_t; |
/** TLB Demap Operation types. */ |
#define TLB_DEMAP_PAGE 0 |
#define TLB_DEMAP_CONTEXT 1 |
/** TLB Demap Operation Context register encodings. */ |
#define TLB_DEMAP_PRIMARY 0 |
#define TLB_DEMAP_SECONDARY 1 |
#define TLB_DEMAP_NUCLEUS 2 |
/** TLB Demap Operation Address. */ |
union tlb_demap_addr { |
uint64_t value; |
384,7 → 399,7 |
da.context = context_encoding; |
da.vpn = pg.vpn; |
asi_u64_write(ASI_IMMU_DEMAP, da.value, 0); |
asi_u64_write(ASI_IMMU_DEMAP, da.value, 0); /* da.value is the address within the ASI */ |
flush(); |
} |
406,7 → 421,7 |
da.context = context_encoding; |
da.vpn = pg.vpn; |
asi_u64_write(ASI_DMMU_DEMAP, da.value, 0); |
asi_u64_write(ASI_DMMU_DEMAP, da.value, 0); /* da.value is the address within the ASI */ |
membar(); |
} |
416,6 → 431,8 |
extern void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize, bool locked, bool cacheable); |
#endif /* !def __ASM__ */ |
#endif |
/** @} |
/trunk/kernel/arch/sparc64/src/sparc64.c |
---|
91,87 → 91,5 |
{ |
} |
/** Take over TLB and trap table. |
* |
* Initialize ITLB and DTLB and switch to kernel |
* trap table. |
* |
* First, demap context 0 and install the |
* global 4M locked kernel mapping. |
* |
* Second, prepare a temporary IMMU mapping in |
* context 1, switch to it, demap context 0, |
* install the global 4M locked kernel mapping |
* in context 0 and switch back to context 0. |
* |
* @param base Base address that will be hardwired in both TLBs. |
*/ |
void take_over_tlb_and_tt(uintptr_t base) |
{ |
tlb_tag_access_reg_t tag; |
tlb_data_t data; |
frame_address_t fr; |
page_address_t pg; |
/* |
* Switch to the kernel trap table. |
*/ |
trap_switch_trap_table(); |
fr.address = base; |
pg.address = base; |
/* |
* We do identity mapping of 4M-page at 4M. |
*/ |
tag.value = 0; |
tag.context = 0; |
tag.vpn = pg.vpn; |
data.value = 0; |
data.v = true; |
data.size = PAGESIZE_4M; |
data.pfn = fr.pfn; |
data.l = true; |
data.cp = 1; |
data.cv = 0; |
data.p = true; |
data.w = true; |
data.g = true; |
/* |
* Straightforwardly demap DMUU context 0, |
* and replace it with the locked kernel mapping. |
*/ |
dtlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_NUCLEUS, 0); |
dtlb_tag_access_write(tag.value); |
dtlb_data_in_write(data.value); |
/* |
* Install kernel code mapping in context 1 |
* and switch to it. |
*/ |
tag.context = 1; |
data.g = false; |
itlb_tag_access_write(tag.value); |
itlb_data_in_write(data.value); |
mmu_primary_context_write(1); |
/* |
* Demap old context 0. |
*/ |
itlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_NUCLEUS, 0); |
/* |
* Install the locked kernel mapping in context 0 |
* and switch to it. |
*/ |
tag.context = 0; |
data.g = true; |
itlb_tag_access_write(tag.value); |
itlb_data_in_write(data.value); |
mmu_primary_context_write(0); |
} |
/** @} |
*/ |
/trunk/kernel/arch/sparc64/src/start.S |
---|
27,7 → 27,12 |
# |
#include <arch/regdef.h> |
#include <arch/boot/boot.h> |
#include <arch/mm/mmu.h> |
#include <arch/mm/tlb.h> |
#include <arch/mm/tte.h> |
.register %g2, #scratch |
.register %g3, #scratch |
.register %g6, #scratch |
52,15 → 57,21 |
.global kernel_image_start |
kernel_image_start: |
flushw ! flush all but the active register window |
/* |
* Disable interrupts and disable 32-bit address masking. |
* Setup basic runtime environment. |
*/ |
rdpr %pstate, %l0 |
and %l0, ~(PSTATE_AM_BIT|PSTATE_IE_BIT), %l0 |
wrpr %l0, 0, %pstate |
flushw ! flush all but the active register window |
wrpr %g0, 0, %tl ! TL = 0, primary context register is used |
! Disable interrupts and disable 32-bit address masking. |
rdpr %pstate, %g1 |
and %g1, ~(PSTATE_AM_BIT|PSTATE_IE_BIT), %g1 |
wrpr %g1, 0, %pstate |
wrpr %r0, 0, %pil ! intialize %pil |
/* |
* Copy the bootinfo structure passed from the boot loader |
* to the kernel bootinfo structure. |
71,19 → 82,135 |
call memcpy |
nop |
set kernel_image_start, %o0 |
/* |
* Take over control of MMU. |
* Switch to kernel trap table. |
*/ |
set trap_table, %g1 |
wrpr %g1, 0, %tba |
/* |
* Take over the DMMU by installing global locked |
* TTE entry identically mapping the first 4M |
* of memory. |
* |
* First, take over DMMU for which we don't need to issue |
* any FLUSH instructions. Because of that, we can |
* demap the old DTLB pretty straightforwardly. |
* In case of DMMU, no FLUSH instructions need to be |
* issued. Because of that, the old DTLB contents can |
* be demapped pretty straightforwardly and without |
* causing any traps. |
*/ |
call take_over_tlb_and_tt |
nop |
wrpr %r0, 0, %pil |
wr %g0, ASI_DMMU, %asi |
#define SET_TLB_DEMAP_CMD(r1, context_id) \ |
set (TLB_DEMAP_CONTEXT<<TLB_DEMAP_TYPE_SHIFT) | (context_id<<TLB_DEMAP_CONTEXT_SHIFT), %r1 |
! demap context 0 |
SET_TLB_DEMAP_CMD(g1, TLB_DEMAP_NUCLEUS) |
stxa %g0, [%g1] ASI_DMMU_DEMAP |
membar #Sync |
#define SET_TLB_TAG(r1, context) \ |
set VMA | (context<<TLB_TAG_ACCESS_CONTEXT_SHIFT), %r1 |
! write DTLB tag |
SET_TLB_TAG(g1, MEM_CONTEXT_KERNEL) |
stxa %g1, [VA_DMMU_TAG_ACCESS] %asi |
membar #Sync |
#define SET_TLB_DATA(r1, r2, imm) \ |
set TTE_L | TTE_CP | TTE_P | TTE_W | LMA | imm, %r1; \ |
set PAGESIZE_4M, %r2; \ |
sllx %r2, TTE_SIZE_SHIFT, %r2; \ |
or %r1, %r2, %r1; \ |
set 1, %r2; \ |
sllx %r2, TTE_V_SHIFT, %r2; \ |
or %r1, %r2, %r1; |
! write DTLB data and install the kernel mapping |
SET_TLB_DATA(g1, g2, TTE_G) |
stxa %g1, [%g0] ASI_DTLB_DATA_IN_REG |
membar #Sync |
/* |
* Now is time to take over the IMMU. |
* Unfortunatelly, it cannot be done as easily as the DMMU, |
* because the IMMU is mapping the code it executes. |
* |
* [ Note that brave experiments with disabling the IMMU |
* and using the DMMU approach failed after a dozen |
* of desparate days with only little success. ] |
* |
* The approach used here is inspired from OpenBSD. |
* First, the kernel creates IMMU mapping for itself |
* in context 1 (MEM_CONTEXT_TEMP) and switches to |
* it. Context 0 (MEM_CONTEXT_KERNEL) can be demapped |
* afterwards and replaced with the kernel permanent |
* mapping. Finally, the kernel switches back to |
* context 0 and demaps context 1. |
* |
* Moreover, the IMMU requires use of the FLUSH instructions. |
* But that is OK because we always use operands with |
* addresses already mapped by the taken over DTLB. |
*/ |
set kernel_image_start, %g7 |
! write ITLB tag of context 1 |
SET_TLB_TAG(g1, MEM_CONTEXT_TEMP) |
set VA_DMMU_TAG_ACCESS, %g2 |
stxa %g1, [%g2] ASI_IMMU |
flush %g7 |
! write ITLB data and install the temporary mapping in context 1 |
SET_TLB_DATA(g1, g2, 0) ! use non-global mapping |
stxa %g1, [%g0] ASI_ITLB_DATA_IN_REG |
flush %g7 |
! switch to context 1 |
set MEM_CONTEXT_TEMP, %g1 |
stxa %g1, [VA_PRIMARY_CONTEXT_REG] %asi ! ASI_DMMU is correct here !!! |
flush %g7 |
! demap context 0 |
SET_TLB_DEMAP_CMD(g1, TLB_DEMAP_NUCLEUS) |
stxa %g0, [%g1] ASI_IMMU_DEMAP |
flush %g7 |
! write ITLB tag of context 0 |
SET_TLB_TAG(g1, MEM_CONTEXT_KERNEL) |
set VA_DMMU_TAG_ACCESS, %g2 |
stxa %g1, [%g2] ASI_IMMU |
flush %g7 |
! write ITLB data and install the permanent kernel mapping in context 0 |
SET_TLB_DATA(g1, g2, 0) ! use non-global mapping |
stxa %g1, [%g0] ASI_ITLB_DATA_IN_REG |
flush %g7 |
! switch to context 0 |
stxa %g0, [VA_PRIMARY_CONTEXT_REG] %asi ! ASI_DMMU is correct here !!! |
flush %g7 |
! ensure nucleus mapping |
wrpr %g0, 1, %tl |
! set context 1 in the primary context register |
set MEM_CONTEXT_TEMP, %g1 |
stxa %g1, [VA_PRIMARY_CONTEXT_REG] %asi ! ASI_DMMU is correct here !!! |
flush %g7 |
! demap context 1 |
SET_TLB_DEMAP_CMD(g1, TLB_DEMAP_PRIMARY) |
stxa %g0, [%g1] ASI_IMMU_DEMAP |
flush %g7 |
! set context 0 in the primary context register |
stxa %g0, [VA_PRIMARY_CONTEXT_REG] %asi ! ASI_DMMU is correct here !!! |
flush %g7 |
! set TL back to 0 |
wrpr %g0, 0, %tl |
call main_bsp |
nop |