/trunk/kernel/arch/sparc64/src/smp/ipi.c |
---|
38,7 → 38,6 |
#include <arch/asm.h> |
#include <config.h> |
#include <mm/tlb.h> |
#include <arch/mm/cache.h> |
#include <arch/interrupt.h> |
#include <arch/trap/interrupt.h> |
#include <arch/barrier.h> |
121,9 → 120,6 |
case IPI_TLB_SHOOTDOWN: |
func = tlb_shootdown_ipi_recv; |
break; |
case IPI_DCACHE_SHOOTDOWN: |
func = dcache_shootdown_ipi_recv; |
break; |
default: |
panic("Unknown IPI (%d).\n", ipi); |
break; |
/trunk/kernel/arch/sparc64/src/trap/interrupt.c |
---|
44,7 → 44,6 |
#include <print.h> |
#include <arch.h> |
#include <mm/tlb.h> |
#include <arch/mm/cache.h> |
#include <config.h> |
#include <synch/spinlock.h> |
91,8 → 90,6 |
#ifdef CONFIG_SMP |
if (data0 == (uintptr_t) tlb_shootdown_ipi_recv) { |
tlb_shootdown_ipi_recv(); |
} else if (data0 == (uintptr_t) dcache_shootdown_ipi_recv) { |
dcache_shootdown_ipi_recv(); |
} |
#endif |
} else { |
/trunk/kernel/arch/sparc64/src/mm/as.c |
---|
49,10 → 49,6 |
#include <macros.h> |
#endif /* CONFIG_TSB */ |
#ifdef CONFIG_VIRT_IDX_DCACHE |
#include <arch/mm/cache.h> |
#endif /* CONFIG_VIRT_IDX_DCACHE */ |
/** Architecture dependent address space init. */ |
void as_arch_init(void) |
{ |
162,23 → 158,6 |
tsb_base.base = ((uintptr_t) as->arch.dtsb) >> PAGE_WIDTH; |
dtsb_base_write(tsb_base.value); |
#endif |
#ifdef CONFIG_VIRT_IDX_DCACHE |
if (as->dcache_flush_on_install) { |
/* |
* Some mappings in this address space are illegal address |
* aliases. Upon their creation, the dcache_flush_on_install |
* flag was set. |
* |
* We are now obliged to flush the D-cache in order to guarantee |
* that there will be at most one cache line for each address |
* alias. |
* |
* This flush performs a cleanup after another address space in |
* which the alias might have existed. |
*/ |
dcache_flush(); |
} |
#endif /* CONFIG_VIRT_IDX_DCACHE */ |
} |
/** Perform sparc64-specific tasks when an address space is removed from the processor. |
213,26 → 192,6 |
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb); |
} |
#endif |
#ifdef CONFIG_VIRT_IDX_DCACHE |
if (as->dcache_flush_on_deinstall) { |
/* |
* Some mappings in this address space are illegal address |
* aliases. Upon their creation, the dcache_flush_on_deinstall |
* flag was set. |
* |
* We are now obliged to flush the D-cache in order to guarantee |
* that there will be at most one cache line for each address |
* alias. |
* |
* This flush performs a cleanup after this address space. It is |
* necessary because other address spaces that contain the same |
* alias are not necessarily aware of the need to carry out the |
* cache flush. The only address spaces that are aware of it are |
* those that created the illegal alias. |
*/ |
dcache_flush(); |
} |
#endif /* CONFIG_VIRT_IDX_DCACHE */ |
} |
/** @} |
/trunk/kernel/arch/sparc64/src/mm/cache.c |
---|
31,68 → 31,10 |
*/ |
/** |
* @file |
* @brief D-cache shootdown algorithm. |
*/ |
#include <arch/mm/cache.h> |
#ifdef CONFIG_SMP |
#include <smp/ipi.h> |
#include <arch/interrupt.h> |
#include <synch/spinlock.h> |
#include <arch.h> |
#include <debug.h> |
/** |
* This spinlock is used by the processors to synchronize during the D-cache |
* shootdown. |
*/ |
SPINLOCK_INITIALIZE(dcachelock); |
/** Initialize the D-cache shootdown sequence. |
* |
* Start the shootdown sequence by sending out an IPI and wait until all |
* processors spin on the dcachelock spinlock. |
*/ |
void dcache_shootdown_start(void) |
{ |
int i; |
CPU->arch.dcache_active = 0; |
spinlock_lock(&dcachelock); |
ipi_broadcast(IPI_DCACHE_SHOOTDOWN); |
busy_wait: |
for (i = 0; i < config.cpu_count; i++) |
if (cpus[i].arch.dcache_active) |
goto busy_wait; |
} |
/** Finish the D-cache shootdown sequence. */ |
void dcache_shootdown_finalize(void) |
{ |
spinlock_unlock(&dcachelock); |
CPU->arch.dcache_active = 1; |
} |
/** Process the D-cache shootdown IPI. */ |
void dcache_shootdown_ipi_recv(void) |
{ |
ASSERT(CPU); |
CPU->arch.dcache_active = 0; |
spinlock_lock(&dcachelock); |
spinlock_unlock(&dcachelock); |
dcache_flush(); |
CPU->arch.dcache_active = 1; |
} |
#endif /* CONFIG_SMP */ |
/** @} |
*/ |
/trunk/kernel/arch/sparc64/src/mm/page.c |
---|
73,8 → 73,9 |
*/ |
for (i = 0; i < bsp_locked_dtlb_entries; i++) { |
dtlb_insert_mapping(bsp_locked_dtlb_entry[i].virt_page, |
bsp_locked_dtlb_entry[i].phys_page, bsp_locked_dtlb_entry[i].pagesize_code, |
true, false); |
bsp_locked_dtlb_entry[i].phys_page, |
bsp_locked_dtlb_entry[i].pagesize_code, true, |
false); |
} |
#endif |
151,9 → 152,12 |
/* |
* Second, save the information about the mapping for APs. |
*/ |
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].virt_page = virtaddr + i*sizemap[order].increment; |
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].phys_page = physaddr + i*sizemap[order].increment; |
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].pagesize_code = sizemap[order].pagesize_code; |
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].virt_page = |
virtaddr + i*sizemap[order].increment; |
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].phys_page = |
physaddr + i*sizemap[order].increment; |
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].pagesize_code = |
sizemap[order].pagesize_code; |
bsp_locked_dtlb_entries++; |
#endif |
} |