Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 2140 → Rev 2141

/trunk/kernel/arch/sparc64/include/stack.h
43,7 → 43,7
/**
* 16-extended-word save area for %i[0-7] and %l[0-7] registers.
*/
#define STACK_WINDOW_SAVE_AREA_SIZE (16*STACK_ITEM_SIZE)
#define STACK_WINDOW_SAVE_AREA_SIZE (16 * STACK_ITEM_SIZE)
 
/**
* By convention, the actual top of the stack is %sp + STACK_BIAS.
/trunk/kernel/arch/sparc64/include/mm/frame.h
35,7 → 35,20
#ifndef KERN_sparc64_FRAME_H_
#define KERN_sparc64_FRAME_H_
 
#define FRAME_WIDTH 13 /* 8K */
/*
* Page size supported by the MMU.
* For 8K there is the nasty illegal virtual aliasing problem.
* Therefore, the kernel uses 8K only internally on the TLB and TSB levels.
*/
#define MMU_FRAME_WIDTH 13 /* 8K */
#define MMU_FRAME_SIZE (1 << MMU_FRAME_WIDTH)
 
/*
* Page size exported to the generic memory management subsystems.
* This page size is not directly supported by the MMU, but we can emulate
* each 16K page with a pair of adjacent 8K pages.
*/
#define FRAME_WIDTH 14 /* 16K */
#define FRAME_SIZE (1 << FRAME_WIDTH)
 
#ifdef KERNEL
/trunk/kernel/arch/sparc64/include/mm/page.h
37,11 → 37,27
 
#include <arch/mm/frame.h>
 
/*
* On the TLB and TSB level, we still use 8K pages, which are supported by the
* MMU.
*/
#define MMU_PAGE_WIDTH MMU_FRAME_WIDTH
#define MMU_PAGE_SIZE MMU_FRAME_SIZE
 
/*
* On the page table level, we use 16K pages. 16K pages are not supported by
* the MMU but we emulate them with pairs of 8K pages.
*/
#define PAGE_WIDTH FRAME_WIDTH
#define PAGE_SIZE FRAME_SIZE
 
#define PAGE_COLOR_BITS 1 /**< 14 - 13; 2^14 == 16K == alias boundary. */
#define MMU_PAGES_PER_PAGE (1 << (PAGE_WIDTH - MMU_PAGE_WIDTH))
 
/*
* With 16K pages, there is only one page color.
*/
#define PAGE_COLOR_BITS 0 /**< 14 - 14; 2^14 == 16K == alias boundary. */
 
#ifdef KERNEL
 
#ifndef __ASM__
/trunk/kernel/arch/sparc64/include/mm/as.h
81,10 → 81,11
#include <genarch/mm/as_ht.h>
 
#ifdef CONFIG_TSB
# include <arch/mm/tsb.h>
# define as_invalidate_translation_cache(as, page, cnt) tsb_invalidate(as, page, cnt)
#include <arch/mm/tsb.h>
#define as_invalidate_translation_cache(as, page, cnt) \
tsb_invalidate((as), (page), (cnt))
#else
# define as_invalidate_translation_cache(as, page, cnt)
#define as_invalidate_translation_cache(as, page, cnt)
#endif
 
extern void as_arch_init(void);
/trunk/kernel/arch/sparc64/include/mm/cache.h
43,44 → 43,10
#define dcache_flush_frame(p, f) \
dcache_flush_tag(PAGE_COLOR((p)), ADDR2PFN((f)));
 
/**
* Enumerations to differentiate among different scopes of D-Cache
* invalidation.
*/
typedef enum {
DCACHE_INVL_INVALID,
DCACHE_INVL_ALL,
DCACHE_INVL_COLOR,
DCACHE_INVL_FRAME
} dcache_invalidate_type_t;
 
/**
* Number of messages that can be queued in the cpu_arch_t structure at a time.
*/
#define DCACHE_MSG_QUEUE_LEN 10
 
/** D-cache shootdown message type. */
typedef struct {
dcache_invalidate_type_t type;
int color;
uintptr_t frame;
} dcache_shootdown_msg_t;
 
extern void dcache_flush(void);
extern void dcache_flush_color(int c);
extern void dcache_flush_tag(int c, pfn_t tag);
 
#ifdef CONFIG_SMP
extern void dcache_shootdown_start(dcache_invalidate_type_t type, int color,
uintptr_t frame);
extern void dcache_shootdown_finalize(void);
extern void dcache_shootdown_ipi_recv(void);
#else
#define dcache_shootdown_start(t, c, f)
#define dcache_shootdown_finalize()
#define dcache_shootdown_ipi_recv()
#endif /* CONFIG_SMP */
 
#endif
 
/** @}
/trunk/kernel/arch/sparc64/include/mm/tsb.h
112,8 → 112,8
struct pte;
 
extern void tsb_invalidate(struct as *as, uintptr_t page, count_t pages);
extern void itsb_pte_copy(struct pte *t);
extern void dtsb_pte_copy(struct pte *t, bool ro);
extern void itsb_pte_copy(struct pte *t, index_t index);
extern void dtsb_pte_copy(struct pte *t, index_t index, bool ro);
 
#endif /* !def __ASM__ */
 
/trunk/kernel/arch/sparc64/include/cpu.h
64,11 → 64,6
uint64_t next_tick_cmpr; /**< Next clock interrupt should be
generated when the TICK register
matches this value. */
#ifdef CONFIG_SMP
int dcache_active;
dcache_shootdown_msg_t dcache_messages[DCACHE_MSG_QUEUE_LEN];
count_t dcache_message_count;
#endif
} cpu_arch_t;
#endif
/trunk/kernel/arch/sparc64/Makefile.inc
83,8 → 83,7
arch/$(ARCH)/src/fpu_context.c \
arch/$(ARCH)/src/dummy.s \
arch/$(ARCH)/src/mm/as.c \
arch/$(ARCH)/src/mm/cache.c \
arch/$(ARCH)/src/mm/cache_asm.S \
arch/$(ARCH)/src/mm/cache.S \
arch/$(ARCH)/src/mm/frame.c \
arch/$(ARCH)/src/mm/page.c \
arch/$(ARCH)/src/mm/tlb.c \
/trunk/kernel/arch/sparc64/src/smp/ipi.c
39,7 → 39,6
#include <arch/asm.h>
#include <config.h>
#include <mm/tlb.h>
#include <arch/mm/cache.h>
#include <arch/interrupt.h>
#include <arch/trap/interrupt.h>
#include <arch/barrier.h>
125,11 → 124,6
case IPI_TLB_SHOOTDOWN:
func = tlb_shootdown_ipi_recv;
break;
#if (defined(CONFIG_SMP) && (defined(CONFIG_VIRT_IDX_DCACHE)))
case IPI_DCACHE_SHOOTDOWN:
func = dcache_shootdown_ipi_recv;
break;
#endif
default:
panic("Unknown IPI (%d).\n", ipi);
break;
/trunk/kernel/arch/sparc64/src/trap/interrupt.c
44,7 → 44,6
#include <print.h>
#include <arch.h>
#include <mm/tlb.h>
#include <arch/mm/cache.h>
#include <config.h>
#include <synch/spinlock.h>
 
91,10 → 90,6
#ifdef CONFIG_SMP
if (data0 == (uintptr_t) tlb_shootdown_ipi_recv) {
tlb_shootdown_ipi_recv();
#ifdef CONFIG_VIRT_IDX_DCACHE
} else if (data0 == (uintptr_t) dcache_shootdown_ipi_recv) {
dcache_shootdown_ipi_recv();
#endif
}
#endif
} else {
/trunk/kernel/arch/sparc64/src/cpu/cpu.c
51,11 → 51,6
upa_config.value = upa_config_read();
CPU->arch.mid = upa_config.mid;
#if (defined(CONFIG_SMP) && defined(CONFIG_VIRT_IDX_DCACHE))
CPU->arch.dcache_active = 1;
CPU->arch.dcache_message_count = 0;
#endif
 
/*
* Detect processor frequency.
*/
/trunk/kernel/arch/sparc64/src/mm/cache_asm.S
File deleted
/trunk/kernel/arch/sparc64/src/mm/cache.c
File deleted
/trunk/kernel/arch/sparc64/src/mm/tlb.c
54,14 → 54,14
#include <arch/mm/tsb.h>
#endif
 
static void dtlb_pte_copy(pte_t *t, bool ro);
static void itlb_pte_copy(pte_t *t);
static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const
char *str);
static void dtlb_pte_copy(pte_t *t, index_t index, bool ro);
static void itlb_pte_copy(pte_t *t, index_t index);
static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,
const char *str);
static void do_fast_data_access_mmu_miss_fault(istate_t *istate,
tlb_tag_access_reg_t tag, const char *str);
tlb_tag_access_reg_t tag, const char *str);
static void do_fast_data_access_protection_fault(istate_t *istate,
tlb_tag_access_reg_t tag, const char *str);
tlb_tag_access_reg_t tag, const char *str);
 
char *context_encoding[] = {
"Primary",
92,8 → 92,8
* @param locked True for permanent mappings, false otherwise.
* @param cacheable True if the mapping is cacheable, false otherwise.
*/
void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize, bool
locked, bool cacheable)
void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize,
bool locked, bool cacheable)
{
tlb_tag_access_reg_t tag;
tlb_data_t data;
126,11 → 126,12
 
/** Copy PTE to TLB.
*
* @param t Page Table Entry to be copied.
* @param ro If true, the entry will be created read-only, regardless of its w
* field.
* @param t Page Table Entry to be copied.
* @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
* @param ro If true, the entry will be created read-only, regardless of its
* w field.
*/
void dtlb_pte_copy(pte_t *t, bool ro)
void dtlb_pte_copy(pte_t *t, index_t index, bool ro)
{
tlb_tag_access_reg_t tag;
tlb_data_t data;
137,15 → 138,15
page_address_t pg;
frame_address_t fr;
 
pg.address = t->page;
fr.address = t->frame;
pg.address = t->page + (index << MMU_PAGE_WIDTH);
fr.address = t->frame + (index << MMU_PAGE_WIDTH);
 
tag.value = 0;
tag.context = t->as->asid;
tag.vpn = pg.vpn;
 
dtlb_tag_access_write(tag.value);
 
data.value = 0;
data.v = true;
data.size = PAGESIZE_8K;
158,15 → 159,16
data.p = t->k; /* p like privileged */
data.w = ro ? false : t->w;
data.g = t->g;
 
dtlb_data_in_write(data.value);
}
 
/** Copy PTE to ITLB.
*
* @param t Page Table Entry to be copied.
* @param t Page Table Entry to be copied.
* @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
*/
void itlb_pte_copy(pte_t *t)
void itlb_pte_copy(pte_t *t, index_t index)
{
tlb_tag_access_reg_t tag;
tlb_data_t data;
173,8 → 175,8
page_address_t pg;
frame_address_t fr;
 
pg.address = t->page;
fr.address = t->frame;
pg.address = t->page + (index << MMU_PAGE_WIDTH);
fr.address = t->frame + (index << MMU_PAGE_WIDTH);
 
tag.value = 0;
tag.context = t->as->asid;
199,6 → 201,7
void fast_instruction_access_mmu_miss(int n, istate_t *istate)
{
uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
index_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE;
pte_t *t;
 
page_table_lock(AS, true);
209,9 → 212,9
* Insert it into ITLB.
*/
t->a = true;
itlb_pte_copy(t);
itlb_pte_copy(t, index);
#ifdef CONFIG_TSB
itsb_pte_copy(t);
itsb_pte_copy(t, index);
#endif
page_table_unlock(AS, true);
} else {
222,7 → 225,7
page_table_unlock(AS, true);
if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
do_fast_instruction_access_mmu_miss_fault(istate,
__FUNCTION__);
__FUNCTION__);
}
}
}
236,19 → 239,21
{
tlb_tag_access_reg_t tag;
uintptr_t va;
index_t index;
pte_t *t;
 
tag.value = dtlb_tag_access_read();
va = tag.vpn << PAGE_WIDTH;
va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
index = tag.vpn % MMU_PAGES_PER_PAGE;
 
if (tag.context == ASID_KERNEL) {
if (!tag.vpn) {
/* NULL access in kernel */
do_fast_data_access_mmu_miss_fault(istate, tag,
__FUNCTION__);
__FUNCTION__);
}
do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected "
"kernel page fault.");
"kernel page fault.");
}
 
page_table_lock(AS, true);
259,19 → 264,20
* Insert it into DTLB.
*/
t->a = true;
dtlb_pte_copy(t, true);
dtlb_pte_copy(t, index, true);
#ifdef CONFIG_TSB
dtsb_pte_copy(t, true);
dtsb_pte_copy(t, index, true);
#endif
page_table_unlock(AS, true);
} else {
/*
* Forward the page fault to the address space page fault handler.
* Forward the page fault to the address space page fault
* handler.
*/
page_table_unlock(AS, true);
if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
do_fast_data_access_mmu_miss_fault(istate, tag,
__FUNCTION__);
__FUNCTION__);
}
}
}
281,10 → 287,12
{
tlb_tag_access_reg_t tag;
uintptr_t va;
index_t index;
pte_t *t;
 
tag.value = dtlb_tag_access_read();
va = tag.vpn << PAGE_WIDTH;
va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
index = tag.vpn % MMU_PAGES_PER_PAGE; /* 16K-page emulation */
 
page_table_lock(AS, true);
t = page_mapping_find(AS, va);
296,10 → 304,11
*/
t->a = true;
t->d = true;
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY, va);
dtlb_pte_copy(t, false);
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY,
va + index * MMU_PAGE_SIZE);
dtlb_pte_copy(t, index, false);
#ifdef CONFIG_TSB
dtsb_pte_copy(t, false);
dtsb_pte_copy(t, index, false);
#endif
page_table_unlock(AS, true);
} else {
310,7 → 319,7
page_table_unlock(AS, true);
if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
do_fast_data_access_protection_fault(istate, tag,
__FUNCTION__);
__FUNCTION__);
}
}
}
328,10 → 337,10
t.value = itlb_tag_read_read(i);
 
printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
"ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, "
"cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag,
d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
"ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, "
"cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag,
d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
}
 
printf("D-TLB contents:\n");
340,16 → 349,16
t.value = dtlb_tag_read_read(i);
printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
"ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, "
"cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag,
d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
"ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, "
"cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag,
d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
}
 
}
 
void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char
*str)
void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,
const char *str)
{
fault_if_from_uspace(istate, "%s\n", str);
dump_istate(istate);
356,29 → 365,29
panic("%s\n", str);
}
 
void do_fast_data_access_mmu_miss_fault(istate_t *istate, tlb_tag_access_reg_t
tag, const char *str)
void do_fast_data_access_mmu_miss_fault(istate_t *istate,
tlb_tag_access_reg_t tag, const char *str)
{
uintptr_t va;
 
va = tag.vpn << PAGE_WIDTH;
va = tag.vpn << MMU_PAGE_WIDTH;
 
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va,
tag.context);
tag.context);
dump_istate(istate);
printf("Faulting page: %p, ASID=%d\n", va, tag.context);
panic("%s\n", str);
}
 
void do_fast_data_access_protection_fault(istate_t *istate, tlb_tag_access_reg_t
tag, const char *str)
void do_fast_data_access_protection_fault(istate_t *istate,
tlb_tag_access_reg_t tag, const char *str)
{
uintptr_t va;
 
va = tag.vpn << PAGE_WIDTH;
va = tag.vpn << MMU_PAGE_WIDTH;
 
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va,
tag.context);
tag.context);
printf("Faulting page: %p, ASID=%d\n", va, tag.context);
dump_istate(istate);
panic("%s\n", str);
393,8 → 402,8
sfar = dtlb_sfar_read();
printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, "
"fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w,
sfsr.ow, sfsr.fv);
"fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w,
sfsr.ow, sfsr.fv);
printf("DTLB SFAR: address=%p\n", sfar);
dtlb_sfsr_write(0);
481,11 → 490,11
ctx.context = asid;
mmu_primary_context_write(ctx.v);
for (i = 0; i < cnt; i++) {
for (i = 0; i < cnt * MMU_PAGES_PER_PAGE; i++) {
itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
page + i * PAGE_SIZE);
page + i * MMU_PAGE_SIZE);
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
page + i * PAGE_SIZE);
page + i * MMU_PAGE_SIZE);
}
mmu_primary_context_write(pc_save.v);
/trunk/kernel/arch/sparc64/src/mm/as.c
62,7 → 62,7
{
#ifdef CONFIG_TSB
int order = fnzb32(((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
sizeof(tsb_entry_t)) >> FRAME_WIDTH);
sizeof(tsb_entry_t)) >> MMU_FRAME_WIDTH);
uintptr_t tsb = (uintptr_t) frame_alloc(order, flags | FRAME_KA);
 
if (!tsb)
71,8 → 71,8
as->arch.itsb = (tsb_entry_t *) tsb;
as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT *
sizeof(tsb_entry_t));
memsetb((uintptr_t) as->arch.itsb, (ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT)
* sizeof(tsb_entry_t), 0);
memsetb((uintptr_t) as->arch.itsb,
(ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * sizeof(tsb_entry_t), 0);
#endif
return 0;
}
81,7 → 81,7
{
#ifdef CONFIG_TSB
count_t cnt = ((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
sizeof(tsb_entry_t)) >> FRAME_WIDTH;
sizeof(tsb_entry_t)) >> MMU_FRAME_WIDTH;
frame_free(KA2PA((uintptr_t) as->arch.itsb));
return cnt;
#else
139,7 → 139,7
 
uintptr_t tsb = (uintptr_t) as->arch.itsb;
if (!overlaps(tsb, 8 * PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
/*
* TSBs were allocated from memory not covered
* by the locked 4M kernel DTLB entry. We need
158,9 → 158,9
tsb_base.size = TSB_SIZE;
tsb_base.split = 0;
 
tsb_base.base = ((uintptr_t) as->arch.itsb) >> PAGE_WIDTH;
tsb_base.base = ((uintptr_t) as->arch.itsb) >> MMU_PAGE_WIDTH;
itsb_base_write(tsb_base.value);
tsb_base.base = ((uintptr_t) as->arch.dtsb) >> PAGE_WIDTH;
tsb_base.base = ((uintptr_t) as->arch.dtsb) >> MMU_PAGE_WIDTH;
dtsb_base_write(tsb_base.value);
#endif
}
189,7 → 189,7
 
uintptr_t tsb = (uintptr_t) as->arch.itsb;
if (!overlaps(tsb, 8 * PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
/*
* TSBs were allocated from memory not covered
* by the locked 4M kernel DTLB entry. We need
/trunk/kernel/arch/sparc64/src/mm/cache.S
0,0 → 1,91
/*
* Copyright (c) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <arch/arch.h>
 
#define DCACHE_SIZE (16 * 1024)
#define DCACHE_LINE_SIZE 32
 
#define DCACHE_TAG_SHIFT 2
 
.register %g2, #scratch
.register %g3, #scratch
 
/** Flush the whole D-cache. */
.global dcache_flush
dcache_flush:
set (DCACHE_SIZE - DCACHE_LINE_SIZE), %g1
stxa %g0, [%g1] ASI_DCACHE_TAG
0: membar #Sync
subcc %g1, DCACHE_LINE_SIZE, %g1
bnz,pt %xcc, 0b
stxa %g0, [%g1] ASI_DCACHE_TAG
retl
membar #Sync
 
/** Flush only D-cache lines of one virtual color.
*
* @param o0 Virtual color to be flushed.
*/
.global dcache_flush_color
dcache_flush_color:
mov (DCACHE_SIZE / DCACHE_LINE_SIZE) / 2, %g1
set DCACHE_SIZE / 2, %g2
sllx %g2, %o0, %g2
sub %g2, DCACHE_LINE_SIZE, %g2
0: stxa %g0, [%g2] ASI_DCACHE_TAG
membar #Sync
subcc %g1, 1, %g1
bnz,pt %xcc, 0b
sub %g2, DCACHE_LINE_SIZE, %g2
retl
nop
 
/** Flush only D-cache lines of one virtual color and one tag.
*
* @param o0 Virtual color to lookup the tag.
* @param o1 Tag of the cachelines to be flushed.
*/
.global dcache_flush_tag
dcache_flush_tag:
mov (DCACHE_SIZE / DCACHE_LINE_SIZE) / 2, %g1
set DCACHE_SIZE / 2, %g2
sllx %g2, %o0, %g2
sub %g2, DCACHE_LINE_SIZE, %g2
0: ldxa [%g2] ASI_DCACHE_TAG, %g3
srlx %g3, DCACHE_TAG_SHIFT, %g3
cmp %g3, %o1
bnz 1f
nop
stxa %g0, [%g2] ASI_DCACHE_TAG
membar #Sync
1: subcc %g1, 1, %g1
bnz,pt %xcc, 0b
sub %g2, DCACHE_LINE_SIZE, %g2
retl
nop
/trunk/kernel/arch/sparc64/src/mm/tsb.c
34,6 → 34,7
 
#include <arch/mm/tsb.h>
#include <arch/mm/tlb.h>
#include <arch/mm/page.h>
#include <arch/barrier.h>
#include <mm/as.h>
#include <arch/types.h>
40,7 → 41,7
#include <macros.h>
#include <debug.h>
 
#define TSB_INDEX_MASK ((1 << (21 + 1 + TSB_SIZE - PAGE_WIDTH)) - 1)
#define TSB_INDEX_MASK ((1 << (21 + 1 + TSB_SIZE - MMU_PAGE_WIDTH)) - 1)
 
/** Invalidate portion of TSB.
*
59,28 → 60,31
ASSERT(as->arch.itsb && as->arch.dtsb);
i0 = (page >> PAGE_WIDTH) & TSB_INDEX_MASK;
cnt = min(pages, ITSB_ENTRY_COUNT);
i0 = (page >> MMU_PAGE_WIDTH) & TSB_INDEX_MASK;
cnt = min(pages * MMU_PAGES_PER_PAGE, ITSB_ENTRY_COUNT);
for (i = 0; i < cnt; i++) {
as->arch.itsb[(i0 + i) & (ITSB_ENTRY_COUNT - 1)].tag.invalid =
true;
true;
as->arch.dtsb[(i0 + i) & (DTSB_ENTRY_COUNT - 1)].tag.invalid =
true;
true;
}
}
 
/** Copy software PTE to ITSB.
*
* @param t Software PTE.
* @param t Software PTE.
* @param index Zero if lower 8K-subpage, one if higher 8K subpage.
*/
void itsb_pte_copy(pte_t *t)
void itsb_pte_copy(pte_t *t, index_t index)
{
as_t *as;
tsb_entry_t *tsb;
index_t entry;
as = t->as;
tsb = &as->arch.itsb[(t->page >> PAGE_WIDTH) & TSB_INDEX_MASK];
entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK;
tsb = &as->arch.itsb[entry];
 
/*
* We use write barriers to make sure that the TSB load
95,10 → 99,11
write_barrier();
 
tsb->tag.context = as->asid;
tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT;
tsb->tag.va_tag = (t->page + (index << MMU_PAGE_WIDTH)) >>
VA_TAG_PAGE_SHIFT;
tsb->data.value = 0;
tsb->data.size = PAGESIZE_8K;
tsb->data.pfn = t->frame >> FRAME_WIDTH;
tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index;
tsb->data.cp = t->c;
tsb->data.p = t->k; /* p as privileged */
tsb->data.v = t->p;
110,16 → 115,19
 
/** Copy software PTE to DTSB.
*
* @param t Software PTE.
* @param ro If true, the mapping is copied read-only.
* @param t Software PTE.
* @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
* @param ro If true, the mapping is copied read-only.
*/
void dtsb_pte_copy(pte_t *t, bool ro)
void dtsb_pte_copy(pte_t *t, index_t index, bool ro)
{
as_t *as;
tsb_entry_t *tsb;
index_t entry;
as = t->as;
tsb = &as->arch.dtsb[(t->page >> PAGE_WIDTH) & TSB_INDEX_MASK];
entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK;
tsb = &as->arch.dtsb[entry];
 
/*
* We use write barriers to make sure that the TSB load
134,10 → 142,11
write_barrier();
 
tsb->tag.context = as->asid;
tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT;
tsb->tag.va_tag = (t->page + (index << MMU_PAGE_WIDTH)) >>
VA_TAG_PAGE_SHIFT;
tsb->data.value = 0;
tsb->data.size = PAGESIZE_8K;
tsb->data.pfn = t->frame >> FRAME_WIDTH;
tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index;
tsb->data.cp = t->c;
#ifdef CONFIG_VIRT_IDX_DCACHE
tsb->data.cv = t->c;
/trunk/kernel/arch/sparc64/src/mm/page.c
73,9 → 73,9
*/
for (i = 0; i < bsp_locked_dtlb_entries; i++) {
dtlb_insert_mapping(bsp_locked_dtlb_entry[i].virt_page,
bsp_locked_dtlb_entry[i].phys_page,
bsp_locked_dtlb_entry[i].pagesize_code, true,
false);
bsp_locked_dtlb_entry[i].phys_page,
bsp_locked_dtlb_entry[i].pagesize_code, true,
false);
}
#endif
 
107,26 → 107,26
size_t increment;
count_t count;
} sizemap[] = {
{ PAGESIZE_8K, 0, 1 }, /* 8K */
{ PAGESIZE_8K, PAGE_SIZE, 2 }, /* 16K */
{ PAGESIZE_8K, PAGE_SIZE, 4 }, /* 32K */
{ PAGESIZE_64K, 0, 1}, /* 64K */
{ PAGESIZE_64K, 8 * PAGE_SIZE, 2 }, /* 128K */
{ PAGESIZE_64K, 8 * PAGE_SIZE, 4 }, /* 256K */
{ PAGESIZE_512K, 0, 1 }, /* 512K */
{ PAGESIZE_512K, 64 * PAGE_SIZE, 2 }, /* 1M */
{ PAGESIZE_512K, 64 * PAGE_SIZE, 4 }, /* 2M */
{ PAGESIZE_4M, 0, 1 }, /* 4M */
{ PAGESIZE_4M, 512 * PAGE_SIZE, 2 } /* 8M */
{ PAGESIZE_8K, 0, 1 }, /* 8K */
{ PAGESIZE_8K, MMU_PAGE_SIZE, 2 }, /* 16K */
{ PAGESIZE_8K, MMU_PAGE_SIZE, 4 }, /* 32K */
{ PAGESIZE_64K, 0, 1}, /* 64K */
{ PAGESIZE_64K, 8 * MMU_PAGE_SIZE, 2 }, /* 128K */
{ PAGESIZE_64K, 8 * MMU_PAGE_SIZE, 4 }, /* 256K */
{ PAGESIZE_512K, 0, 1 }, /* 512K */
{ PAGESIZE_512K, 64 * MMU_PAGE_SIZE, 2 }, /* 1M */
{ PAGESIZE_512K, 64 * MMU_PAGE_SIZE, 4 }, /* 2M */
{ PAGESIZE_4M, 0, 1 }, /* 4M */
{ PAGESIZE_4M, 512 * MMU_PAGE_SIZE, 2 } /* 8M */
};
ASSERT(ALIGN_UP(physaddr, PAGE_SIZE) == physaddr);
ASSERT(ALIGN_UP(physaddr, MMU_PAGE_SIZE) == physaddr);
ASSERT(size <= 8 * 1024 * 1024);
if (size <= FRAME_SIZE)
if (size <= MMU_FRAME_SIZE)
order = 0;
else
order = (fnzb64(size - 1) + 1) - FRAME_WIDTH;
order = (fnzb64(size - 1) + 1) - MMU_FRAME_WIDTH;
 
/*
* Use virtual addresses that are beyond the limit of physical memory.
134,8 → 134,10
* by frame_alloc().
*/
ASSERT(PA2KA(last_frame));
uintptr_t virtaddr = ALIGN_UP(PA2KA(last_frame), 1 << (order + FRAME_WIDTH));
last_frame = ALIGN_UP(KA2PA(virtaddr) + size, 1 << (order + FRAME_WIDTH));
uintptr_t virtaddr = ALIGN_UP(PA2KA(last_frame),
1 << (order + FRAME_WIDTH));
last_frame = ALIGN_UP(KA2PA(virtaddr) + size,
1 << (order + FRAME_WIDTH));
for (i = 0; i < sizemap[order].count; i++) {
/*
142,8 → 144,8
* First, insert the mapping into DTLB.
*/
dtlb_insert_mapping(virtaddr + i * sizemap[order].increment,
physaddr + i * sizemap[order].increment,
sizemap[order].pagesize_code, true, false);
physaddr + i * sizemap[order].increment,
sizemap[order].pagesize_code, true, false);
#ifdef CONFIG_SMP
/*
150,11 → 152,11
* Second, save the information about the mapping for APs.
*/
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].virt_page =
virtaddr + i * sizemap[order].increment;
virtaddr + i * sizemap[order].increment;
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].phys_page =
physaddr + i * sizemap[order].increment;
physaddr + i * sizemap[order].increment;
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].pagesize_code =
sizemap[order].pagesize_code;
sizemap[order].pagesize_code;
bsp_locked_dtlb_entries++;
#endif
}