/trunk/kernel/arch/sparc64/include/mm/cache.h |
---|
43,10 → 43,44 |
#define dcache_flush_frame(p, f) \ |
dcache_flush_tag(PAGE_COLOR((p)), ADDR2PFN((f))); |
/** |
* Enumerations to differentiate among different scopes of D-Cache |
* invalidation. |
*/ |
typedef enum { |
DCACHE_INVL_INVALID, |
DCACHE_INVL_ALL, |
DCACHE_INVL_COLOR, |
DCACHE_INVL_FRAME |
} dcache_invalidate_type_t; |
/** |
* Number of messages that can be queued in the cpu_arch_t structure at a time. |
*/ |
#define DCACHE_MSG_QUEUE_LEN 10 |
/** D-cache shootdown message type. */ |
typedef struct { |
dcache_invalidate_type_t type; |
int color; |
uintptr_t frame; |
} dcache_shootdown_msg_t; |
extern void dcache_flush(void); |
extern void dcache_flush_color(int c); |
extern void dcache_flush_tag(int c, pfn_t tag); |
#ifdef CONFIG_SMP |
extern void dcache_shootdown_start(dcache_invalidate_type_t type, int color, |
uintptr_t frame); |
extern void dcache_shootdown_finalize(void); |
extern void dcache_shootdown_ipi_recv(void); |
#else |
#define dcache_shootdown_start(t, c, f) |
#define dcache_shootdown_finalize() |
#define dcache_shootdown_ipi_recv() |
#endif /* CONFIG_SMP */ |
#endif |
/** @} |
/trunk/kernel/arch/sparc64/include/mm/tsb.h |
---|
112,8 → 112,8 |
struct pte; |
extern void tsb_invalidate(struct as *as, uintptr_t page, count_t pages); |
extern void itsb_pte_copy(struct pte *t, index_t index); |
extern void dtsb_pte_copy(struct pte *t, index_t index, bool ro); |
extern void itsb_pte_copy(struct pte *t); |
extern void dtsb_pte_copy(struct pte *t, bool ro); |
#endif /* !def __ASM__ */ |
/trunk/kernel/arch/sparc64/include/mm/page.h |
---|
37,27 → 37,11 |
#include <arch/mm/frame.h> |
/* |
* On the TLB and TSB level, we still use 8K pages, which are supported by the |
* MMU. |
*/ |
#define MMU_PAGE_WIDTH MMU_FRAME_WIDTH |
#define MMU_PAGE_SIZE MMU_FRAME_SIZE |
/* |
* On the page table level, we use 16K pages. 16K pages are not supported by |
* the MMU but we emulate them with pairs of 8K pages. |
*/ |
#define PAGE_WIDTH FRAME_WIDTH |
#define PAGE_SIZE FRAME_SIZE |
#define MMU_PAGES_PER_PAGE (1 << (PAGE_WIDTH - MMU_PAGE_WIDTH)) |
#define PAGE_COLOR_BITS 1 /**< 14 - 13; 2^14 == 16K == alias boundary. */ |
/* |
* With 16K pages, there is only one page color. |
*/ |
#define PAGE_COLOR_BITS 0 /**< 14 - 14; 2^14 == 16K == alias boundary. */ |
#ifdef KERNEL |
#ifndef __ASM__ |
/trunk/kernel/arch/sparc64/include/mm/as.h |
---|
81,11 → 81,10 |
#include <genarch/mm/as_ht.h> |
#ifdef CONFIG_TSB |
#include <arch/mm/tsb.h> |
#define as_invalidate_translation_cache(as, page, cnt) \ |
tsb_invalidate((as), (page), (cnt)) |
# include <arch/mm/tsb.h> |
# define as_invalidate_translation_cache(as, page, cnt) tsb_invalidate(as, page, cnt) |
#else |
#define as_invalidate_translation_cache(as, page, cnt) |
# define as_invalidate_translation_cache(as, page, cnt) |
#endif |
extern void as_arch_init(void); |
/trunk/kernel/arch/sparc64/include/mm/frame.h |
---|
35,20 → 35,7 |
#ifndef KERN_sparc64_FRAME_H_ |
#define KERN_sparc64_FRAME_H_ |
/* |
* Page size supported by the MMU. |
* For 8K there is the nasty illegal virtual aliasing problem. |
* Therefore, the kernel uses 8K only internally on the TLB and TSB levels. |
*/ |
#define MMU_FRAME_WIDTH 13 /* 8K */ |
#define MMU_FRAME_SIZE (1 << MMU_FRAME_WIDTH) |
/* |
* Page size exported to the generic memory management subsystems. |
* This page size is not directly supported by the MMU, but we can emulate |
* each 16K page with a pair of adjacent 8K pages. |
*/ |
#define FRAME_WIDTH 14 /* 16K */ |
#define FRAME_WIDTH 13 /* 8K */ |
#define FRAME_SIZE (1 << FRAME_WIDTH) |
#ifdef KERNEL |
/trunk/kernel/arch/sparc64/include/cpu.h |
---|
64,6 → 64,11 |
uint64_t next_tick_cmpr; /**< Next clock interrupt should be |
generated when the TICK register |
matches this value. */ |
#ifdef CONFIG_SMP |
int dcache_active; |
dcache_shootdown_msg_t dcache_messages[DCACHE_MSG_QUEUE_LEN]; |
count_t dcache_message_count; |
#endif |
} cpu_arch_t; |
#endif |
/trunk/kernel/arch/sparc64/include/stack.h |
---|
43,7 → 43,7 |
/** |
* 16-extended-word save area for %i[0-7] and %l[0-7] registers. |
*/ |
#define STACK_WINDOW_SAVE_AREA_SIZE (16 * STACK_ITEM_SIZE) |
#define STACK_WINDOW_SAVE_AREA_SIZE (16*STACK_ITEM_SIZE) |
/** |
* By convention, the actual top of the stack is %sp + STACK_BIAS. |
/trunk/kernel/arch/sparc64/src/smp/ipi.c |
---|
39,6 → 39,7 |
#include <arch/asm.h> |
#include <config.h> |
#include <mm/tlb.h> |
#include <arch/mm/cache.h> |
#include <arch/interrupt.h> |
#include <arch/trap/interrupt.h> |
#include <arch/barrier.h> |
124,6 → 125,11 |
case IPI_TLB_SHOOTDOWN: |
func = tlb_shootdown_ipi_recv; |
break; |
#if (defined(CONFIG_SMP) && (defined(CONFIG_VIRT_IDX_DCACHE))) |
case IPI_DCACHE_SHOOTDOWN: |
func = dcache_shootdown_ipi_recv; |
break; |
#endif |
default: |
panic("Unknown IPI (%d).\n", ipi); |
break; |
/trunk/kernel/arch/sparc64/src/trap/interrupt.c |
---|
44,6 → 44,7 |
#include <print.h> |
#include <arch.h> |
#include <mm/tlb.h> |
#include <arch/mm/cache.h> |
#include <config.h> |
#include <synch/spinlock.h> |
90,6 → 91,10 |
#ifdef CONFIG_SMP |
if (data0 == (uintptr_t) tlb_shootdown_ipi_recv) { |
tlb_shootdown_ipi_recv(); |
#ifdef CONFIG_VIRT_IDX_DCACHE |
} else if (data0 == (uintptr_t) dcache_shootdown_ipi_recv) { |
dcache_shootdown_ipi_recv(); |
#endif |
} |
#endif |
} else { |
/trunk/kernel/arch/sparc64/src/cpu/cpu.c |
---|
51,6 → 51,11 |
upa_config.value = upa_config_read(); |
CPU->arch.mid = upa_config.mid; |
#if (defined(CONFIG_SMP) && defined(CONFIG_VIRT_IDX_DCACHE)) |
CPU->arch.dcache_active = 1; |
CPU->arch.dcache_message_count = 0; |
#endif |
/* |
* Detect processor frequency. |
*/ |
/trunk/kernel/arch/sparc64/src/mm/cache.S |
---|
File deleted |
/trunk/kernel/arch/sparc64/src/mm/tlb.c |
---|
54,14 → 54,14 |
#include <arch/mm/tsb.h> |
#endif |
static void dtlb_pte_copy(pte_t *t, index_t index, bool ro); |
static void itlb_pte_copy(pte_t *t, index_t index); |
static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, |
const char *str); |
static void dtlb_pte_copy(pte_t *t, bool ro); |
static void itlb_pte_copy(pte_t *t); |
static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const |
char *str); |
static void do_fast_data_access_mmu_miss_fault(istate_t *istate, |
tlb_tag_access_reg_t tag, const char *str); |
tlb_tag_access_reg_t tag, const char *str); |
static void do_fast_data_access_protection_fault(istate_t *istate, |
tlb_tag_access_reg_t tag, const char *str); |
tlb_tag_access_reg_t tag, const char *str); |
char *context_encoding[] = { |
"Primary", |
92,8 → 92,8 |
* @param locked True for permanent mappings, false otherwise. |
* @param cacheable True if the mapping is cacheable, false otherwise. |
*/ |
void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize, |
bool locked, bool cacheable) |
void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize, bool |
locked, bool cacheable) |
{ |
tlb_tag_access_reg_t tag; |
tlb_data_t data; |
126,12 → 126,11 |
/** Copy PTE to TLB. |
* |
* @param t Page Table Entry to be copied. |
* @param index Zero if lower 8K-subpage, one if higher 8K-subpage. |
* @param ro If true, the entry will be created read-only, regardless of its |
* w field. |
* @param t Page Table Entry to be copied. |
* @param ro If true, the entry will be created read-only, regardless of its w |
* field. |
*/ |
void dtlb_pte_copy(pte_t *t, index_t index, bool ro) |
void dtlb_pte_copy(pte_t *t, bool ro) |
{ |
tlb_tag_access_reg_t tag; |
tlb_data_t data; |
138,15 → 137,15 |
page_address_t pg; |
frame_address_t fr; |
pg.address = t->page + (index << MMU_PAGE_WIDTH); |
fr.address = t->frame + (index << MMU_PAGE_WIDTH); |
pg.address = t->page; |
fr.address = t->frame; |
tag.value = 0; |
tag.context = t->as->asid; |
tag.vpn = pg.vpn; |
dtlb_tag_access_write(tag.value); |
data.value = 0; |
data.v = true; |
data.size = PAGESIZE_8K; |
159,16 → 158,15 |
data.p = t->k; /* p like privileged */ |
data.w = ro ? false : t->w; |
data.g = t->g; |
dtlb_data_in_write(data.value); |
} |
/** Copy PTE to ITLB. |
* |
* @param t Page Table Entry to be copied. |
* @param index Zero if lower 8K-subpage, one if higher 8K-subpage. |
* @param t Page Table Entry to be copied. |
*/ |
void itlb_pte_copy(pte_t *t, index_t index) |
void itlb_pte_copy(pte_t *t) |
{ |
tlb_tag_access_reg_t tag; |
tlb_data_t data; |
175,8 → 173,8 |
page_address_t pg; |
frame_address_t fr; |
pg.address = t->page + (index << MMU_PAGE_WIDTH); |
fr.address = t->frame + (index << MMU_PAGE_WIDTH); |
pg.address = t->page; |
fr.address = t->frame; |
tag.value = 0; |
tag.context = t->as->asid; |
201,7 → 199,6 |
void fast_instruction_access_mmu_miss(int n, istate_t *istate) |
{ |
uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE); |
index_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE; |
pte_t *t; |
page_table_lock(AS, true); |
212,9 → 209,9 |
* Insert it into ITLB. |
*/ |
t->a = true; |
itlb_pte_copy(t, index); |
itlb_pte_copy(t); |
#ifdef CONFIG_TSB |
itsb_pte_copy(t, index); |
itsb_pte_copy(t); |
#endif |
page_table_unlock(AS, true); |
} else { |
225,7 → 222,7 |
page_table_unlock(AS, true); |
if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) { |
do_fast_instruction_access_mmu_miss_fault(istate, |
__FUNCTION__); |
__FUNCTION__); |
} |
} |
} |
239,21 → 236,19 |
{ |
tlb_tag_access_reg_t tag; |
uintptr_t va; |
index_t index; |
pte_t *t; |
tag.value = dtlb_tag_access_read(); |
va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE); |
index = tag.vpn % MMU_PAGES_PER_PAGE; |
va = tag.vpn << PAGE_WIDTH; |
if (tag.context == ASID_KERNEL) { |
if (!tag.vpn) { |
/* NULL access in kernel */ |
do_fast_data_access_mmu_miss_fault(istate, tag, |
__FUNCTION__); |
__FUNCTION__); |
} |
do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected " |
"kernel page fault."); |
"kernel page fault."); |
} |
page_table_lock(AS, true); |
264,20 → 259,19 |
* Insert it into DTLB. |
*/ |
t->a = true; |
dtlb_pte_copy(t, index, true); |
dtlb_pte_copy(t, true); |
#ifdef CONFIG_TSB |
dtsb_pte_copy(t, index, true); |
dtsb_pte_copy(t, true); |
#endif |
page_table_unlock(AS, true); |
} else { |
/* |
* Forward the page fault to the address space page fault |
* handler. |
* Forward the page fault to the address space page fault handler. |
*/ |
page_table_unlock(AS, true); |
if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { |
do_fast_data_access_mmu_miss_fault(istate, tag, |
__FUNCTION__); |
__FUNCTION__); |
} |
} |
} |
287,12 → 281,10 |
{ |
tlb_tag_access_reg_t tag; |
uintptr_t va; |
index_t index; |
pte_t *t; |
tag.value = dtlb_tag_access_read(); |
va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE); |
index = tag.vpn % MMU_PAGES_PER_PAGE; /* 16K-page emulation */ |
va = tag.vpn << PAGE_WIDTH; |
page_table_lock(AS, true); |
t = page_mapping_find(AS, va); |
304,11 → 296,10 |
*/ |
t->a = true; |
t->d = true; |
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY, |
va + index * MMU_PAGE_SIZE); |
dtlb_pte_copy(t, index, false); |
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY, va); |
dtlb_pte_copy(t, false); |
#ifdef CONFIG_TSB |
dtsb_pte_copy(t, index, false); |
dtsb_pte_copy(t, false); |
#endif |
page_table_unlock(AS, true); |
} else { |
319,7 → 310,7 |
page_table_unlock(AS, true); |
if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) { |
do_fast_data_access_protection_fault(istate, tag, |
__FUNCTION__); |
__FUNCTION__); |
} |
} |
} |
337,10 → 328,10 |
t.value = itlb_tag_read_read(i); |
printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, " |
"ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, " |
"cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn, |
t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag, |
d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g); |
"ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, " |
"cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn, |
t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag, |
d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g); |
} |
printf("D-TLB contents:\n"); |
349,16 → 340,16 |
t.value = dtlb_tag_read_read(i); |
printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, " |
"ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, " |
"cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn, |
t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag, |
d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g); |
"ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, " |
"cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn, |
t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag, |
d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g); |
} |
} |
void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, |
const char *str) |
void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char |
*str) |
{ |
fault_if_from_uspace(istate, "%s\n", str); |
dump_istate(istate); |
365,29 → 356,29 |
panic("%s\n", str); |
} |
void do_fast_data_access_mmu_miss_fault(istate_t *istate, |
tlb_tag_access_reg_t tag, const char *str) |
void do_fast_data_access_mmu_miss_fault(istate_t *istate, tlb_tag_access_reg_t |
tag, const char *str) |
{ |
uintptr_t va; |
va = tag.vpn << MMU_PAGE_WIDTH; |
va = tag.vpn << PAGE_WIDTH; |
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va, |
tag.context); |
tag.context); |
dump_istate(istate); |
printf("Faulting page: %p, ASID=%d\n", va, tag.context); |
panic("%s\n", str); |
} |
void do_fast_data_access_protection_fault(istate_t *istate, |
tlb_tag_access_reg_t tag, const char *str) |
void do_fast_data_access_protection_fault(istate_t *istate, tlb_tag_access_reg_t |
tag, const char *str) |
{ |
uintptr_t va; |
va = tag.vpn << MMU_PAGE_WIDTH; |
va = tag.vpn << PAGE_WIDTH; |
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va, |
tag.context); |
tag.context); |
printf("Faulting page: %p, ASID=%d\n", va, tag.context); |
dump_istate(istate); |
panic("%s\n", str); |
402,8 → 393,8 |
sfar = dtlb_sfar_read(); |
printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, " |
"fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, |
sfsr.ow, sfsr.fv); |
"fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w, |
sfsr.ow, sfsr.fv); |
printf("DTLB SFAR: address=%p\n", sfar); |
dtlb_sfsr_write(0); |
490,11 → 481,11 |
ctx.context = asid; |
mmu_primary_context_write(ctx.v); |
for (i = 0; i < cnt * MMU_PAGES_PER_PAGE; i++) { |
for (i = 0; i < cnt; i++) { |
itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, |
page + i * MMU_PAGE_SIZE); |
page + i * PAGE_SIZE); |
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, |
page + i * MMU_PAGE_SIZE); |
page + i * PAGE_SIZE); |
} |
mmu_primary_context_write(pc_save.v); |
/trunk/kernel/arch/sparc64/src/mm/cache.c |
---|
0,0 → 1,157 |
/* |
* Copyright (c) 2006 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup sparc64mm |
* @{ |
*/ |
/** |
* @file |
* @brief D-cache shootdown algorithm. |
*/ |
#include <arch/mm/cache.h> |
#ifdef CONFIG_SMP |
#ifdef CONFIG_VIRT_IDX_DCACHE |
#include <smp/ipi.h> |
#include <arch/interrupt.h> |
#include <synch/spinlock.h> |
#include <arch.h> |
#include <debug.h> |
/** |
* This spinlock is used by the processors to synchronize during the D-cache |
* shootdown. |
*/ |
SPINLOCK_INITIALIZE(dcachelock); |
/** Initialize the D-cache shootdown sequence. |
* |
* Start the shootdown sequence by sending out an IPI and wait until all |
* processors spin on the dcachelock spinlock. |
* |
* @param type Scope of the D-cache shootdown. |
* @param color Color to be invalidated; applicable only for DCACHE_INVL_COLOR |
* and DCACHE_INVL_FRAME invalidation types. |
* @param frame Frame to be invalidated; applicable only for DCACHE_INVL_FRAME |
* invalidation types. |
*/ |
void dcache_shootdown_start(dcache_invalidate_type_t type, int color, |
uintptr_t frame) |
{ |
int i; |
CPU->arch.dcache_active = 0; |
spinlock_lock(&dcachelock); |
for (i = 0; i < config.cpu_count; i++) { |
cpu_t *cpu; |
if (i == CPU->id) |
continue; |
cpu = &cpus[i]; |
spinlock_lock(&cpu->lock); |
if (cpu->arch.dcache_message_count == |
DCACHE_MSG_QUEUE_LEN) { |
/* |
* The queue is full, flush the cache entirely. |
*/ |
cpu->arch.dcache_message_count = 1; |
cpu->arch.dcache_messages[0].type = DCACHE_INVL_ALL; |
cpu->arch.dcache_messages[0].color = 0; /* ignored */ |
cpu->arch.dcache_messages[0].frame = 0; /* ignored */ |
} else { |
index_t idx = cpu->arch.dcache_message_count++; |
cpu->arch.dcache_messages[idx].type = type; |
cpu->arch.dcache_messages[idx].color = color; |
cpu->arch.dcache_messages[idx].frame = frame; |
} |
spinlock_unlock(&cpu->lock); |
} |
ipi_broadcast(IPI_DCACHE_SHOOTDOWN); |
busy_wait: |
for (i = 0; i < config.cpu_count; i++) |
if (cpus[i].arch.dcache_active) |
goto busy_wait; |
} |
/** Finish the D-cache shootdown sequence. */ |
void dcache_shootdown_finalize(void) |
{ |
spinlock_unlock(&dcachelock); |
CPU->arch.dcache_active = 1; |
} |
/** Process the D-cache shootdown IPI. */ |
void dcache_shootdown_ipi_recv(void) |
{ |
int i; |
ASSERT(CPU); |
CPU->arch.dcache_active = 0; |
spinlock_lock(&dcachelock); |
spinlock_unlock(&dcachelock); |
spinlock_lock(&CPU->lock); |
ASSERT(CPU->arch.dcache_message_count < DCACHE_MSG_QUEUE_LEN); |
for (i = 0; i < CPU->arch.dcache_message_count; i++) { |
switch (CPU->arch.dcache_messages[i].type) { |
case DCACHE_INVL_ALL: |
dcache_flush(); |
goto flushed; |
break; |
case DCACHE_INVL_COLOR: |
dcache_flush_color(CPU->arch.dcache_messages[i].color); |
break; |
case DCACHE_INVL_FRAME: |
dcache_flush_frame(CPU->arch.dcache_messages[i].color, |
CPU->arch.dcache_messages[i].frame); |
break; |
default: |
panic("unknown type (%d)\n", |
CPU->arch.dcache_messages[i].type); |
} |
} |
flushed: |
CPU->arch.dcache_message_count = 0; |
spinlock_unlock(&CPU->lock); |
CPU->arch.dcache_active = 1; |
} |
#endif /* CONFIG_VIRT_IDX_DCACHE */ |
#endif /* CONFIG_SMP */ |
/** @} |
*/ |
/trunk/kernel/arch/sparc64/src/mm/as.c |
---|
62,7 → 62,7 |
{ |
#ifdef CONFIG_TSB |
int order = fnzb32(((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * |
sizeof(tsb_entry_t)) >> MMU_FRAME_WIDTH); |
sizeof(tsb_entry_t)) >> FRAME_WIDTH); |
uintptr_t tsb = (uintptr_t) frame_alloc(order, flags | FRAME_KA); |
if (!tsb) |
71,8 → 71,8 |
as->arch.itsb = (tsb_entry_t *) tsb; |
as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT * |
sizeof(tsb_entry_t)); |
memsetb((uintptr_t) as->arch.itsb, |
(ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * sizeof(tsb_entry_t), 0); |
memsetb((uintptr_t) as->arch.itsb, (ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) |
* sizeof(tsb_entry_t), 0); |
#endif |
return 0; |
} |
81,7 → 81,7 |
{ |
#ifdef CONFIG_TSB |
count_t cnt = ((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * |
sizeof(tsb_entry_t)) >> MMU_FRAME_WIDTH; |
sizeof(tsb_entry_t)) >> FRAME_WIDTH; |
frame_free(KA2PA((uintptr_t) as->arch.itsb)); |
return cnt; |
#else |
139,7 → 139,7 |
uintptr_t tsb = (uintptr_t) as->arch.itsb; |
if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) { |
if (!overlaps(tsb, 8 * PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) { |
/* |
* TSBs were allocated from memory not covered |
* by the locked 4M kernel DTLB entry. We need |
158,9 → 158,9 |
tsb_base.size = TSB_SIZE; |
tsb_base.split = 0; |
tsb_base.base = ((uintptr_t) as->arch.itsb) >> MMU_PAGE_WIDTH; |
tsb_base.base = ((uintptr_t) as->arch.itsb) >> PAGE_WIDTH; |
itsb_base_write(tsb_base.value); |
tsb_base.base = ((uintptr_t) as->arch.dtsb) >> MMU_PAGE_WIDTH; |
tsb_base.base = ((uintptr_t) as->arch.dtsb) >> PAGE_WIDTH; |
dtsb_base_write(tsb_base.value); |
#endif |
} |
189,7 → 189,7 |
uintptr_t tsb = (uintptr_t) as->arch.itsb; |
if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) { |
if (!overlaps(tsb, 8 * PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) { |
/* |
* TSBs were allocated from memory not covered |
* by the locked 4M kernel DTLB entry. We need |
/trunk/kernel/arch/sparc64/src/mm/tsb.c |
---|
34,7 → 34,6 |
#include <arch/mm/tsb.h> |
#include <arch/mm/tlb.h> |
#include <arch/mm/page.h> |
#include <arch/barrier.h> |
#include <mm/as.h> |
#include <arch/types.h> |
41,7 → 40,7 |
#include <macros.h> |
#include <debug.h> |
#define TSB_INDEX_MASK ((1 << (21 + 1 + TSB_SIZE - MMU_PAGE_WIDTH)) - 1) |
#define TSB_INDEX_MASK ((1 << (21 + 1 + TSB_SIZE - PAGE_WIDTH)) - 1) |
/** Invalidate portion of TSB. |
* |
60,31 → 59,28 |
ASSERT(as->arch.itsb && as->arch.dtsb); |
i0 = (page >> MMU_PAGE_WIDTH) & TSB_INDEX_MASK; |
cnt = min(pages * MMU_PAGES_PER_PAGE, ITSB_ENTRY_COUNT); |
i0 = (page >> PAGE_WIDTH) & TSB_INDEX_MASK; |
cnt = min(pages, ITSB_ENTRY_COUNT); |
for (i = 0; i < cnt; i++) { |
as->arch.itsb[(i0 + i) & (ITSB_ENTRY_COUNT - 1)].tag.invalid = |
true; |
true; |
as->arch.dtsb[(i0 + i) & (DTSB_ENTRY_COUNT - 1)].tag.invalid = |
true; |
true; |
} |
} |
/** Copy software PTE to ITSB. |
* |
* @param t Software PTE. |
* @param index Zero if lower 8K-subpage, one if higher 8K subpage. |
* @param t Software PTE. |
*/ |
void itsb_pte_copy(pte_t *t, index_t index) |
void itsb_pte_copy(pte_t *t) |
{ |
as_t *as; |
tsb_entry_t *tsb; |
index_t entry; |
as = t->as; |
entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK; |
tsb = &as->arch.itsb[entry]; |
tsb = &as->arch.itsb[(t->page >> PAGE_WIDTH) & TSB_INDEX_MASK]; |
/* |
* We use write barriers to make sure that the TSB load |
99,11 → 95,10 |
write_barrier(); |
tsb->tag.context = as->asid; |
tsb->tag.va_tag = (t->page + (index << MMU_PAGE_WIDTH)) >> |
VA_TAG_PAGE_SHIFT; |
tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT; |
tsb->data.value = 0; |
tsb->data.size = PAGESIZE_8K; |
tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index; |
tsb->data.pfn = t->frame >> FRAME_WIDTH; |
tsb->data.cp = t->c; |
tsb->data.p = t->k; /* p as privileged */ |
tsb->data.v = t->p; |
115,19 → 110,16 |
/** Copy software PTE to DTSB. |
* |
* @param t Software PTE. |
* @param index Zero if lower 8K-subpage, one if higher 8K-subpage. |
* @param ro If true, the mapping is copied read-only. |
* @param t Software PTE. |
* @param ro If true, the mapping is copied read-only. |
*/ |
void dtsb_pte_copy(pte_t *t, index_t index, bool ro) |
void dtsb_pte_copy(pte_t *t, bool ro) |
{ |
as_t *as; |
tsb_entry_t *tsb; |
index_t entry; |
as = t->as; |
entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK; |
tsb = &as->arch.dtsb[entry]; |
tsb = &as->arch.dtsb[(t->page >> PAGE_WIDTH) & TSB_INDEX_MASK]; |
/* |
* We use write barriers to make sure that the TSB load |
142,11 → 134,10 |
write_barrier(); |
tsb->tag.context = as->asid; |
tsb->tag.va_tag = (t->page + (index << MMU_PAGE_WIDTH)) >> |
VA_TAG_PAGE_SHIFT; |
tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT; |
tsb->data.value = 0; |
tsb->data.size = PAGESIZE_8K; |
tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index; |
tsb->data.pfn = t->frame >> FRAME_WIDTH; |
tsb->data.cp = t->c; |
#ifdef CONFIG_VIRT_IDX_DCACHE |
tsb->data.cv = t->c; |
/trunk/kernel/arch/sparc64/src/mm/cache_asm.S |
---|
0,0 → 1,91 |
/* |
* Copyright (c) 2006 Jakub Jermar |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
#include <arch/arch.h> |
#define DCACHE_SIZE (16 * 1024) |
#define DCACHE_LINE_SIZE 32 |
#define DCACHE_TAG_SHIFT 2 |
.register %g2, #scratch |
.register %g3, #scratch |
/** Flush the whole D-cache. */ |
.global dcache_flush |
dcache_flush: |
set (DCACHE_SIZE - DCACHE_LINE_SIZE), %g1 |
stxa %g0, [%g1] ASI_DCACHE_TAG |
0: membar #Sync |
subcc %g1, DCACHE_LINE_SIZE, %g1 |
bnz,pt %xcc, 0b |
stxa %g0, [%g1] ASI_DCACHE_TAG |
retl |
membar #Sync |
/** Flush only D-cache lines of one virtual color. |
* |
* @param o0 Virtual color to be flushed. |
*/ |
.global dcache_flush_color |
dcache_flush_color: |
mov (DCACHE_SIZE / DCACHE_LINE_SIZE) / 2, %g1 |
set DCACHE_SIZE / 2, %g2 |
sllx %g2, %o0, %g2 |
sub %g2, DCACHE_LINE_SIZE, %g2 |
0: stxa %g0, [%g2] ASI_DCACHE_TAG |
membar #Sync |
subcc %g1, 1, %g1 |
bnz,pt %xcc, 0b |
sub %g2, DCACHE_LINE_SIZE, %g2 |
retl |
nop |
/** Flush only D-cache lines of one virtual color and one tag. |
* |
* @param o0 Virtual color to lookup the tag. |
* @param o1 Tag of the cachelines to be flushed. |
*/ |
.global dcache_flush_tag |
dcache_flush_tag: |
mov (DCACHE_SIZE / DCACHE_LINE_SIZE) / 2, %g1 |
set DCACHE_SIZE / 2, %g2 |
sllx %g2, %o0, %g2 |
sub %g2, DCACHE_LINE_SIZE, %g2 |
0: ldxa [%g2] ASI_DCACHE_TAG, %g3 |
srlx %g3, DCACHE_TAG_SHIFT, %g3 |
cmp %g3, %o1 |
bnz 1f |
nop |
stxa %g0, [%g2] ASI_DCACHE_TAG |
membar #Sync |
1: subcc %g1, 1, %g1 |
bnz,pt %xcc, 0b |
sub %g2, DCACHE_LINE_SIZE, %g2 |
retl |
nop |
/trunk/kernel/arch/sparc64/src/mm/page.c |
---|
73,9 → 73,9 |
*/ |
for (i = 0; i < bsp_locked_dtlb_entries; i++) { |
dtlb_insert_mapping(bsp_locked_dtlb_entry[i].virt_page, |
bsp_locked_dtlb_entry[i].phys_page, |
bsp_locked_dtlb_entry[i].pagesize_code, true, |
false); |
bsp_locked_dtlb_entry[i].phys_page, |
bsp_locked_dtlb_entry[i].pagesize_code, true, |
false); |
} |
#endif |
107,26 → 107,26 |
size_t increment; |
count_t count; |
} sizemap[] = { |
{ PAGESIZE_8K, 0, 1 }, /* 8K */ |
{ PAGESIZE_8K, MMU_PAGE_SIZE, 2 }, /* 16K */ |
{ PAGESIZE_8K, MMU_PAGE_SIZE, 4 }, /* 32K */ |
{ PAGESIZE_64K, 0, 1}, /* 64K */ |
{ PAGESIZE_64K, 8 * MMU_PAGE_SIZE, 2 }, /* 128K */ |
{ PAGESIZE_64K, 8 * MMU_PAGE_SIZE, 4 }, /* 256K */ |
{ PAGESIZE_512K, 0, 1 }, /* 512K */ |
{ PAGESIZE_512K, 64 * MMU_PAGE_SIZE, 2 }, /* 1M */ |
{ PAGESIZE_512K, 64 * MMU_PAGE_SIZE, 4 }, /* 2M */ |
{ PAGESIZE_4M, 0, 1 }, /* 4M */ |
{ PAGESIZE_4M, 512 * MMU_PAGE_SIZE, 2 } /* 8M */ |
{ PAGESIZE_8K, 0, 1 }, /* 8K */ |
{ PAGESIZE_8K, PAGE_SIZE, 2 }, /* 16K */ |
{ PAGESIZE_8K, PAGE_SIZE, 4 }, /* 32K */ |
{ PAGESIZE_64K, 0, 1}, /* 64K */ |
{ PAGESIZE_64K, 8 * PAGE_SIZE, 2 }, /* 128K */ |
{ PAGESIZE_64K, 8 * PAGE_SIZE, 4 }, /* 256K */ |
{ PAGESIZE_512K, 0, 1 }, /* 512K */ |
{ PAGESIZE_512K, 64 * PAGE_SIZE, 2 }, /* 1M */ |
{ PAGESIZE_512K, 64 * PAGE_SIZE, 4 }, /* 2M */ |
{ PAGESIZE_4M, 0, 1 }, /* 4M */ |
{ PAGESIZE_4M, 512 * PAGE_SIZE, 2 } /* 8M */ |
}; |
ASSERT(ALIGN_UP(physaddr, MMU_PAGE_SIZE) == physaddr); |
ASSERT(ALIGN_UP(physaddr, PAGE_SIZE) == physaddr); |
ASSERT(size <= 8 * 1024 * 1024); |
if (size <= MMU_FRAME_SIZE) |
if (size <= FRAME_SIZE) |
order = 0; |
else |
order = (fnzb64(size - 1) + 1) - MMU_FRAME_WIDTH; |
order = (fnzb64(size - 1) + 1) - FRAME_WIDTH; |
/* |
* Use virtual addresses that are beyond the limit of physical memory. |
134,10 → 134,8 |
* by frame_alloc(). |
*/ |
ASSERT(PA2KA(last_frame)); |
uintptr_t virtaddr = ALIGN_UP(PA2KA(last_frame), |
1 << (order + FRAME_WIDTH)); |
last_frame = ALIGN_UP(KA2PA(virtaddr) + size, |
1 << (order + FRAME_WIDTH)); |
uintptr_t virtaddr = ALIGN_UP(PA2KA(last_frame), 1 << (order + FRAME_WIDTH)); |
last_frame = ALIGN_UP(KA2PA(virtaddr) + size, 1 << (order + FRAME_WIDTH)); |
for (i = 0; i < sizemap[order].count; i++) { |
/* |
144,8 → 142,8 |
* First, insert the mapping into DTLB. |
*/ |
dtlb_insert_mapping(virtaddr + i * sizemap[order].increment, |
physaddr + i * sizemap[order].increment, |
sizemap[order].pagesize_code, true, false); |
physaddr + i * sizemap[order].increment, |
sizemap[order].pagesize_code, true, false); |
#ifdef CONFIG_SMP |
/* |
152,11 → 150,11 |
* Second, save the information about the mapping for APs. |
*/ |
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].virt_page = |
virtaddr + i * sizemap[order].increment; |
virtaddr + i * sizemap[order].increment; |
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].phys_page = |
physaddr + i * sizemap[order].increment; |
physaddr + i * sizemap[order].increment; |
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].pagesize_code = |
sizemap[order].pagesize_code; |
sizemap[order].pagesize_code; |
bsp_locked_dtlb_entries++; |
#endif |
} |
/trunk/kernel/arch/sparc64/Makefile.inc |
---|
83,7 → 83,8 |
arch/$(ARCH)/src/fpu_context.c \ |
arch/$(ARCH)/src/dummy.s \ |
arch/$(ARCH)/src/mm/as.c \ |
arch/$(ARCH)/src/mm/cache.S \ |
arch/$(ARCH)/src/mm/cache.c \ |
arch/$(ARCH)/src/mm/cache_asm.S \ |
arch/$(ARCH)/src/mm/frame.c \ |
arch/$(ARCH)/src/mm/page.c \ |
arch/$(ARCH)/src/mm/tlb.c \ |