Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 2008 → Rev 2009

/trunk/kernel/kernel.config
97,8 → 97,8
# Support for NS16550 serial port
! [ARCH=sparc64] CONFIG_NS16550 (y/n)
 
# Virtually indexed cache support
! [ARCH=sparc64] CONFIG_VIRT_IDX_CACHE (n/y)
# Virtually indexed D-cache support
! [ARCH=sparc64] CONFIG_VIRT_IDX_DCACHE (y/n)
 
 
## Debugging configuration directives
/trunk/kernel/generic/include/mm/as.h
94,6 → 94,11
/** Address space identifier. Constant on architectures that do not support ASIDs.*/
asid_t asid;
#ifdef CONFIG_VIRT_IDX_DCACHE
bool dcache_flush_on_install;
bool dcache_flush_on_deinstall;
#endif /* CONFIG_VIRT_IDX_DCACHE */
 
/** Architecture specific content. */
as_arch_t arch;
};
160,6 → 165,12
 
/** Data to be used by the backend. */
mem_backend_data_t backend_data;
 
/**
* Virtual color of the original address space area that was at the beginning
* of the share chain.
*/
int orig_color;
};
 
extern as_t *AS_KERNEL;
/trunk/kernel/generic/src/mm/as.c
78,6 → 78,10
#include <syscall/copy.h>
#include <arch/interrupt.h>
 
#ifdef CONFIG_VIRT_IDX_DCACHE
#include <arch/mm/cache.h>
#endif /* CONFIG_VIRT_IDX_DCACHE */
 
/**
* Each architecture decides what functions will be used to carry out
* address space operations such as creating or locking page tables.
162,6 → 166,11
as->cpu_refcount = 0;
as->page_table = page_table_create(flags);
 
#ifdef CONFIG_VIRT_IDX_DCACHE
as->dcache_flush_on_install = false;
as->dcache_flush_on_deinstall = false;
#endif /* CONFIG_VIRT_IDX_DCACHE */
 
return as;
}
 
269,6 → 278,18
else
memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data), 0);
 
#ifdef CONFIG_VIRT_IDX_DCACHE
/*
* When the area is being created with the AS_AREA_ATTR_PARTIAL flag, the
* orig_color is probably wrong until the flag is reset. In other words, it is
* initialized with the color of the area being created and not with the color
* of the original address space area at the beginning of the share chain. Of
* course, the correct color is set by as_area_share() before the flag is
* reset.
*/
a->orig_color = PAGE_COLOR(base);
#endif /* CONFIG_VIRT_IDX_DCACHE */
 
btree_create(&a->used_space);
btree_insert(&as->as_area_btree, base, (void *) a, NULL);
554,9 → 575,7
* such address space area, EPERM if there was a problem in accepting the area
* or ENOMEM if there was a problem in allocating destination address space
* area. ENOTSUP is returned if the address space area backend does not support
* sharing. It can be also returned if the architecture uses virtually indexed
* caches and the source and destination areas start at pages with different
* page colors.
* sharing.
*/
int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
as_t *dst_as, uintptr_t dst_base, int dst_flags_mask)
564,6 → 583,7
ipl_t ipl;
int src_flags;
size_t src_size;
int src_orig_color;
as_area_t *src_area, *dst_area;
share_info_t *sh_info;
mem_backend_t *src_backend;
581,19 → 601,6
return ENOENT;
}
#if 0 /* disable the check for now */
#ifdef CONFIG_VIRT_IDX_CACHE
if (PAGE_COLOR(src_area->base) != PAGE_COLOR(dst_base)) {
/*
* Refuse to create illegal address alias.
*/
mutex_unlock(&src_area->lock);
mutex_unlock(&src_as->lock);
interrupts_restore(ipl);
return ENOTSUP;
}
#endif /* CONFIG_VIRT_IDX_CACHE */
#endif
 
if (!src_area->backend || !src_area->backend->share) {
/*
610,6 → 617,7
src_flags = src_area->flags;
src_backend = src_area->backend;
src_backend_data = src_area->backend_data;
src_orig_color = src_area->orig_color;
 
/* Share the cacheable flag from the original mapping */
if (src_flags & AS_AREA_CACHEABLE)
664,17 → 672,39
interrupts_restore(ipl);
return ENOMEM;
}
 
/*
* Now the destination address space area has been
* fully initialized. Clear the AS_AREA_ATTR_PARTIAL
* attribute and set the sh_info.
*/
mutex_lock(&dst_as->lock);
mutex_lock(&dst_area->lock);
dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
dst_area->sh_info = sh_info;
dst_area->orig_color = src_orig_color;
#ifdef CONFIG_VIRT_IDX_DCACHE
if (src_orig_color != PAGE_COLOR(dst_base)) {
/*
* We have just detected an attempt to create an invalid address
* alias. We allow this and set a special flag that tells the
* architecture specific code to flush the D-cache when the
* offending address space is installed and deinstalled
* (cleanup).
*
* In order for the flags to take effect immediately, we also
* perform a global D-cache shootdown.
*/
dcache_shootdown_start();
dst_as->dcache_flush_on_install = true;
dst_as->dcache_flush_on_deinstall = true;
dcache_flush();
dcache_shootdown_finalize();
}
#endif /* CONFIG_VIRT_IDX_DCACHE */
mutex_unlock(&dst_area->lock);
mutex_unlock(&dst_as->lock);
 
interrupts_restore(ipl);
return 0;
/trunk/kernel/Makefile
91,8 → 91,8
DEFS += -DCONFIG_NS16550
endif
 
ifeq ($(CONFIG_VIRT_IDX_CACHE),y)
DEFS += -DCONFIG_VIRT_IDX_CACHE
ifeq ($(CONFIG_VIRT_IDX_DCACHE),y)
DEFS += -DCONFIG_VIRT_IDX_DCACHE
endif
 
ifeq ($(CONFIG_POWEROFF),y)
/trunk/kernel/arch/sparc64/include/interrupt.h
43,9 → 43,14
#define IVT_ITEMS 15
#define IVT_FIRST 1
 
/* This needs to be defined for inter-architecture API portability. */
#define VECTOR_TLB_SHOOTDOWN_IPI 0
#define IPI_TLB_SHOOTDOWN VECTOR_TLB_SHOOTDOWN_IPI
 
enum {
IPI_TLB_SHOOTDOWN = VECTOR_TLB_SHOOTDOWN_IPI,
IPI_DCACHE_SHOOTDOWN
};
 
struct istate {
uint64_t tnpc;
uint64_t tpc;
/trunk/kernel/arch/sparc64/include/mm/as.h
52,7 → 52,7
#ifdef CONFIG_TSB
tsb_entry_t *itsb;
tsb_entry_t *dtsb;
#endif
#endif /* CONFIG_TSB */
} as_arch_t;
 
#ifdef CONFIG_TSB
/trunk/kernel/arch/sparc64/include/mm/cache.h
35,6 → 35,16
#ifndef KERN_sparc64_CACHE_H_
#define KERN_sparc64_CACHE_H_
 
#ifdef CONFIG_SMP
extern void dcache_shootdown_start(void);
extern void dcache_shootdown_finalize(void);
extern void dcache_shootdown_ipi_recv(void);
#else /* CONFIG_SMP */
#define dcache_shootdown_start();
#define dcache_shootdown_finalize();
#define dcache_shootdown_ipi_recv();
#endif /* CONFIG_SMP */
 
extern void dcache_flush(void);
 
#endif
/trunk/kernel/arch/sparc64/include/cpu.h
55,6 → 55,7
uint32_t mid; /**< Processor ID as read from UPA_CONFIG. */
ver_reg_t ver;
uint32_t clock_frequency; /**< Processor frequency in MHz. */
int dcache_active; /**< When non-zero, the D-cache is not being shot down. */
};
#endif
/trunk/kernel/arch/sparc64/Makefile.inc
84,6 → 84,7
arch/$(ARCH)/src/dummy.s \
arch/$(ARCH)/src/mm/as.c \
arch/$(ARCH)/src/mm/cache.c \
arch/$(ARCH)/src/mm/cache_asm.S \
arch/$(ARCH)/src/mm/frame.c \
arch/$(ARCH)/src/mm/page.c \
arch/$(ARCH)/src/mm/tlb.c \
/trunk/kernel/arch/sparc64/src/smp/ipi.c
38,6 → 38,7
#include <arch/asm.h>
#include <config.h>
#include <mm/tlb.h>
#include <arch/mm/cache.h>
#include <arch/interrupt.h>
#include <arch/trap/interrupt.h>
#include <arch/barrier.h>
120,6 → 121,9
case IPI_TLB_SHOOTDOWN:
func = tlb_shootdown_ipi_recv;
break;
case IPI_DCACHE_SHOOTDOWN:
func = dcache_shootdown_ipi_recv;
break;
default:
panic("Unknown IPI (%d).\n", ipi);
break;
/trunk/kernel/arch/sparc64/src/trap/interrupt.c
44,6 → 44,7
#include <print.h>
#include <arch.h>
#include <mm/tlb.h>
#include <arch/mm/cache.h>
#include <config.h>
#include <synch/spinlock.h>
 
90,6 → 91,8
#ifdef CONFIG_SMP
if (data0 == (uintptr_t) tlb_shootdown_ipi_recv) {
tlb_shootdown_ipi_recv();
} else if (data0 == (uintptr_t) dcache_shootdown_ipi_recv) {
dcache_shootdown_ipi_recv();
}
#endif
} else {
/trunk/kernel/arch/sparc64/src/cpu/cpu.c
92,6 → 92,11
dtlb_insert_mapping((uintptr_t) CPU->stack, KA2PA(CPU->stack), PAGESIZE_8K, true, true);
}
 
/*
* Set the D-cache active flag.
* Needed for the D-cache to work.
*/
CPU->arch.dcache_active = 1;
}
 
/** Read version information from the current processor. */
/trunk/kernel/arch/sparc64/src/mm/cache.S
File deleted
/trunk/kernel/arch/sparc64/src/mm/tlb.c
111,9 → 111,9
data.pfn = fr.pfn;
data.l = locked;
data.cp = cacheable;
#ifdef CONFIG_VIRT_IDX_CACHE
#ifdef CONFIG_VIRT_IDX_DCACHE
data.cv = cacheable;
#endif /* CONFIG_VIRT_IDX_CACHE */
#endif /* CONFIG_VIRT_IDX_DCACHE */
data.p = true;
data.w = true;
data.g = false;
148,9 → 148,9
data.pfn = fr.pfn;
data.l = false;
data.cp = t->c;
#ifdef CONFIG_VIRT_IDX_CACHE
#ifdef CONFIG_VIRT_IDX_DCACHE
data.cv = t->c;
#endif /* CONFIG_VIRT_IDX_CACHE */
#endif /* CONFIG_VIRT_IDX_DCACHE */
data.p = t->k; /* p like privileged */
data.w = ro ? false : t->w;
data.g = t->g;
184,9 → 184,6
data.pfn = fr.pfn;
data.l = false;
data.cp = t->c;
#ifdef CONFIG_VIRT_IDX_CACHE
data.cv = t->c;
#endif /* CONFIG_VIRT_IDX_CACHE */
data.p = t->k; /* p like privileged */
data.w = false;
data.g = t->g;
/trunk/kernel/arch/sparc64/src/mm/as.c
47,8 → 47,12
#include <mm/frame.h>
#include <bitops.h>
#include <macros.h>
#endif
#endif /* CONFIG_TSB */
 
#ifdef CONFIG_VIRT_IDX_DCACHE
#include <arch/mm/cache.h>
#endif /* CONFIG_VIRT_IDX_DCACHE */
 
/** Architecture dependent address space init. */
void as_arch_init(void)
{
158,6 → 162,23
tsb_base.base = ((uintptr_t) as->arch.dtsb) >> PAGE_WIDTH;
dtsb_base_write(tsb_base.value);
#endif
#ifdef CONFIG_VIRT_IDX_DCACHE
if (as->dcache_flush_on_install) {
/*
* Some mappings in this address space are illegal address
* aliases. Upon their creation, the flush_dcache_on_install
* flag was set.
*
* We are now obliged to flush the D-cache in order to guarantee
* that there will be at most one cache line for each address
* alias.
*
* This flush performs a cleanup after another address space in
* which the alias might have existed.
*/
dcache_flush();
}
#endif /* CONFIG_VIRT_IDX_DCACHE */
}
 
/** Perform sparc64-specific tasks when an address space is removed from the processor.
192,6 → 213,26
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
}
#endif
#ifdef CONFIG_VIRT_IDX_DCACHE
if (as->dcache_flush_on_deinstall) {
/*
* Some mappings in this address space are illegal address
* aliases. Upon their creation, the flush_dcache_on_deinstall
* flag was set.
*
* We are now obliged to flush the D-cache in order to guarantee
* that there will be at most one cache line for each address
* alias.
*
* This flush performs a cleanup after this address space. It is
* necessary because other address spaces that contain the same
* alias are not necessarily aware of the need to carry out the
* cache flush. The only address spaces that are aware of it are
* those that created the illegal alias.
*/
dcache_flush();
}
#endif /* CONFIG_VIRT_IDX_DCACHE */
}
 
/** @}
/trunk/kernel/arch/sparc64/src/mm/cache.c
0,0 → 1,98
/*
* Copyright (C) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup sparc64mm
* @{
*/
/**
* @file
* @brief D-cache shootdown algorithm.
*/
 
#include <arch/mm/cache.h>
 
#ifdef CONFIG_SMP
 
#include <smp/ipi.h>
#include <arch/interrupt.h>
#include <synch/spinlock.h>
#include <arch.h>
#include <debug.h>
 
/**
* This spinlock is used by the processors to synchronize during the D-cache
* shootdown.
*/
SPINLOCK_INITIALIZE(dcachelock);
 
/** Initialize the D-cache shootdown sequence.
*
* Start the shootdown sequence by sending out an IPI and wait until all
* processors spin on the dcachelock spinlock.
*/
void dcache_shootdown_start(void)
{
int i;
 
CPU->arch.dcache_active = 0;
spinlock_lock(&dcachelock);
 
ipi_broadcast(IPI_DCACHE_SHOOTDOWN);
 
busy_wait:
for (i = 0; i < config.cpu_count; i++)
if (cpus[i].arch.dcache_active)
goto busy_wait;
}
 
/** Finish the D-cache shootdown sequence. */
void dcache_shootdown_finalize(void)
{
spinlock_unlock(&dcachelock);
CPU->arch.dcache_active = 1;
}
 
/** Process the D-cache shootdown IPI. */
void dcache_shootdown_ipi_recv(void)
{
ASSERT(CPU);
 
CPU->arch.dcache_active = 0;
spinlock_lock(&dcachelock);
spinlock_unlock(&dcachelock);
dcache_flush();
 
CPU->arch.dcache_active = 1;
}
 
#endif /* CONFIG_SMP */
 
/** @}
*/
 
/trunk/kernel/arch/sparc64/src/mm/cache_asm.S
0,0 → 1,44
/*
* Copyright (C) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <arch/arch.h>
 
#define DCACHE_SIZE (16*1024)
#define DCACHE_LINE_SIZE 64
 
.global dcache_flush
dcache_flush:
set (DCACHE_SIZE - DCACHE_LINE_SIZE), %g1
stxa %g0, [%g1] ASI_DCACHE_TAG
0: membar #Sync
subcc %g1, DCACHE_LINE_SIZE, %g1
bnz,pt %xcc, 0b
stxa %g0, [%g1] ASI_DCACHE_TAG
retl
membar #Sync
 
/trunk/kernel/arch/sparc64/src/mm/tsb.c
100,9 → 100,6
tsb->data.size = PAGESIZE_8K;
tsb->data.pfn = t->frame >> FRAME_WIDTH;
tsb->data.cp = t->c;
#ifdef CONFIG_VIRT_IDX_CACHE
tsb->data.cv = t->c;
#endif /* CONFIG_VIRT_IDX_CACHE */
tsb->data.p = t->k; /* p as privileged */
tsb->data.v = t->p;
142,9 → 139,9
tsb->data.size = PAGESIZE_8K;
tsb->data.pfn = t->frame >> FRAME_WIDTH;
tsb->data.cp = t->c;
#ifdef CONFIG_VIRT_IDX_CACHE
#ifdef CONFIG_VIRT_IDX_DCACHE
tsb->data.cv = t->c;
#endif /* CONFIG_VIRT_IDX_CACHE */
#endif /* CONFIG_VIRT_IDX_DCACHE */
tsb->data.p = t->k; /* p as privileged */
tsb->data.w = ro ? false : t->w;
tsb->data.v = t->p;
/trunk/kernel/arch/sparc64/src/start.S
122,11 → 122,11
stxa %g1, [VA_DMMU_TAG_ACCESS] %asi
membar #Sync
 
#ifdef CONFIG_VIRT_IDX_CACHE
#ifdef CONFIG_VIRT_IDX_DCACHE
#define TTE_LOW_DATA(imm) (TTE_CP | TTE_CV | TTE_P | LMA | (imm))
#else /* CONFIG_VIRT_IDX_CACHE */
#else /* CONFIG_VIRT_IDX_DCACHE */
#define TTE_LOW_DATA(imm) (TTE_CP | TTE_P | LMA | (imm))
#endif /* CONFIG_VIRT_IDX_CACHE */
#endif /* CONFIG_VIRT_IDX_DCACHE */
 
#define SET_TLB_DATA(r1, r2, imm) \
set TTE_LOW_DATA(imm), %r1; \
360,8 → 360,8
*/
.global kernel_8k_tlb_data_template
kernel_8k_tlb_data_template:
#ifdef CONFIG_VIRT_IDX_CACHE
#ifdef CONFIG_VIRT_IDX_DCACHE
.quad ((1 << TTE_V_SHIFT) | (PAGESIZE_8K << TTE_SIZE_SHIFT) | TTE_CP | TTE_CV | TTE_P | TTE_W)
#else /* CONFIG_VIRT_IDX_CACHE */
#else /* CONFIG_VIRT_IDX_DCACHE */
.quad ((1 << TTE_V_SHIFT) | (PAGESIZE_8K << TTE_SIZE_SHIFT) | TTE_CP | TTE_P | TTE_W)
#endif /* CONFIG_VIRT_IDX_CACHE */
#endif /* CONFIG_VIRT_IDX_DCACHE */