Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 2133 → Rev 2134

/trunk/kernel/generic/src/mm/backend_anon.c
78,6 → 78,7
int anon_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)
{
uintptr_t frame;
bool dirty = false;
 
if (!as_area_check_access(area, access))
return AS_PF_FAULT;
94,7 → 95,7
*/
mutex_lock(&area->sh_info->lock);
frame = (uintptr_t) btree_search(&area->sh_info->pagemap,
ALIGN_DOWN(addr, PAGE_SIZE) - area->base, &leaf);
ALIGN_DOWN(addr, PAGE_SIZE) - area->base, &leaf);
if (!frame) {
bool allocate = true;
int i;
113,6 → 114,7
if (allocate) {
frame = (uintptr_t) frame_alloc(ONE_FRAME, 0);
memsetb(PA2KA(frame), FRAME_SIZE, 0);
dirty = true;
/*
* Insert the address of the newly allocated
143,6 → 145,7
*/
frame = (uintptr_t) frame_alloc(ONE_FRAME, 0);
memsetb(PA2KA(frame), FRAME_SIZE, 0);
dirty = true;
}
/*
154,6 → 157,21
if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
panic("Could not insert used space.\n");
#ifdef CONFIG_VIRT_IDX_DCACHE
if (dirty && PAGE_COLOR(PA2KA(frame)) != PAGE_COLOR(addr)) {
/*
* By writing to the frame using kernel virtual address,
* we have created an illegal virtual alias. We now have to
* invalidate cachelines belonging to addr on all processors
* so that they will be reloaded with the new content on next
* read.
*/
dcache_flush_frame(addr, frame);
dcache_shootdown_start(DCACHE_INVL_FRAME, PAGE_COLOR(addr), frame);
dcache_shootdown_finalize();
}
#endif
 
return AS_PF_OK;
}
 
168,9 → 186,6
void anon_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame)
{
frame_free(frame);
#ifdef CONFIG_VIRT_IDX_DCACHE
dcache_flush_frame(page, frame);
#endif
}
 
/** Share the anonymous address space area.
217,7 → 232,7
pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte));
frame_reference_add(pfn);
}
 
}
}
mutex_unlock(&area->sh_info->lock);
/trunk/kernel/generic/src/mm/backend_elf.c
81,6 → 81,7
btree_node_t *leaf;
uintptr_t base, frame;
index_t i;
bool dirty = false;
 
if (!as_area_check_access(area, access))
return AS_PF_FAULT;
147,7 → 148,8
frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
memcpy((void *) PA2KA(frame),
(void *) (base + i * FRAME_SIZE), FRAME_SIZE);
dirty = true;
 
if (area->sh_info) {
frame_reference_add(ADDR2PFN(frame));
btree_insert(&area->sh_info->pagemap,
168,6 → 170,7
*/
frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
memsetb(PA2KA(frame), FRAME_SIZE, 0);
dirty = true;
 
if (area->sh_info) {
frame_reference_add(ADDR2PFN(frame));
188,6 → 191,7
memsetb(PA2KA(frame) + size, FRAME_SIZE - size, 0);
memcpy((void *) PA2KA(frame), (void *) (base + i * FRAME_SIZE),
size);
dirty = true;
 
if (area->sh_info) {
frame_reference_add(ADDR2PFN(frame));
205,6 → 209,21
if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
panic("Could not insert used space.\n");
 
#ifdef CONFIG_VIRT_IDX_DCACHE
if (dirty && PAGE_COLOR(PA2KA(frame)) != PAGE_COLOR(addr)) {
/*
* By writing to the frame using kernel virtual address,
* we have created an illegal virtual alias. We now have to
* invalidate cachelines belonging to addr on all processors
* so that they will be reloaded with the new content on next
* read.
*/
dcache_flush_frame(addr, frame);
dcache_shootdown_start(DCACHE_INVL_FRAME, PAGE_COLOR(addr), frame);
dcache_shootdown_finalize();
}
#endif
 
return AS_PF_OK;
}
 
238,9 → 257,6
* data.
*/
frame_free(frame);
#ifdef CONFIG_VIRT_IDX_DCACHE
dcache_flush_frame(page, frame);
#endif
}
} else {
/*
247,11 → 263,8
* The frame is either anonymous memory or the mixed case (i.e.
* lower part is backed by the ELF image and the upper is
* anonymous). In any case, a frame needs to be freed.
*/
frame_free(frame);
#ifdef CONFIG_VIRT_IDX_DCACHE
dcache_flush_frame(page, frame);
#endif
*/
frame_free(frame);
}
}
 
/trunk/kernel/arch/sparc64/include/interrupt.h
46,7 → 46,8
#define VECTOR_TLB_SHOOTDOWN_IPI 0
 
enum {
IPI_TLB_SHOOTDOWN = VECTOR_TLB_SHOOTDOWN_IPI
IPI_TLB_SHOOTDOWN = VECTOR_TLB_SHOOTDOWN_IPI,
IPI_DCACHE_SHOOTDOWN
};
 
typedef struct {
/trunk/kernel/arch/sparc64/include/mm/cache.h
43,10 → 43,44
#define dcache_flush_frame(p, f) \
dcache_flush_tag(PAGE_COLOR((p)), ADDR2PFN((f)));
 
/**
* Enumerations to differentiate among different scopes of D-Cache
* invalidation.
*/
typedef enum {
DCACHE_INVL_INVALID,
DCACHE_INVL_ALL,
DCACHE_INVL_COLOR,
DCACHE_INVL_FRAME
} dcache_invalidate_type_t;
 
/**
* Number of messages that can be queued in the cpu_arch_t structure at a time.
*/
#define DCACHE_MSG_QUEUE_LEN 10
 
/** D-cache shootdown message type. */
typedef struct {
dcache_invalidate_type_t type;
int color;
uintptr_t frame;
} dcache_shootdown_msg_t;
 
extern void dcache_flush(void);
extern void dcache_flush_color(int c);
extern void dcache_flush_tag(int c, pfn_t tag);
 
#ifdef CONFIG_SMP
extern void dcache_shootdown_start(dcache_invalidate_type_t type, int color,
uintptr_t frame);
extern void dcache_shootdown_finalize(void);
extern void dcache_shootdown_ipi_recv(void);
#else
#define dcache_shootdown_start(t, c, f)
#define dcache_shootdown_finalize()
#define dcache_shootdown_ipi_recv()
#endif /* CONFIG_SMP */
 
#endif
 
/** @}
/trunk/kernel/arch/sparc64/include/cpu.h
39,6 → 39,10
#include <arch/register.h>
#include <arch/asm.h>
 
#ifdef CONFIG_SMP
#include <arch/mm/cache.h>
#endif
 
#define MANUF_FUJITSU 0x04
#define MANUF_ULTRASPARC 0x17 /**< UltraSPARC I, UltraSPARC II */
#define MANUF_SUN 0x3e
53,12 → 57,18
#define IMPL_SPARC64V 0x5
 
typedef struct {
uint32_t mid; /**< Processor ID as read from UPA_CONFIG. */
uint32_t mid; /**< Processor ID as read from
UPA_CONFIG. */
ver_reg_t ver;
uint32_t clock_frequency; /**< Processor frequency in Hz. */
uint64_t next_tick_cmpr; /**< Next clock interrupt should be
generated when the TICK register
matches this value. */
generated when the TICK register
matches this value. */
#ifdef CONFIG_SMP
int dcache_active;
dcache_shootdown_msg_t dcache_messages[DCACHE_MSG_QUEUE_LEN];
count_t dcache_message_count;
#endif
} cpu_arch_t;
#endif
/trunk/kernel/arch/sparc64/src/smp/ipi.c
39,6 → 39,7
#include <arch/asm.h>
#include <config.h>
#include <mm/tlb.h>
#include <arch/mm/cache.h>
#include <arch/interrupt.h>
#include <arch/trap/interrupt.h>
#include <arch/barrier.h>
78,9 → 79,9
func);
asi_u64_write(ASI_UDB_INTR_W, ASI_UDB_INTR_W_DATA_1, 0);
asi_u64_write(ASI_UDB_INTR_W, ASI_UDB_INTR_W_DATA_2, 0);
asi_u64_write(ASI_UDB_INTR_W, (mid <<
INTR_VEC_DISPATCH_MID_SHIFT) | ASI_UDB_INTR_W_DISPATCH,
0);
asi_u64_write(ASI_UDB_INTR_W,
(mid << INTR_VEC_DISPATCH_MID_SHIFT) |
ASI_UDB_INTR_W_DISPATCH, 0);
membar();
124,6 → 125,11
case IPI_TLB_SHOOTDOWN:
func = tlb_shootdown_ipi_recv;
break;
#if (defined(CONFIG_SMP) && (defined(CONFIG_VIRT_IDX_DCACHE)))
case IPI_DCACHE_SHOOTDOWN:
func = dcache_shootdown_ipi_recv;
break;
#endif
default:
panic("Unknown IPI (%d).\n", ipi);
break;
/trunk/kernel/arch/sparc64/src/trap/interrupt.c
44,6 → 44,7
#include <print.h>
#include <arch.h>
#include <mm/tlb.h>
#include <arch/mm/cache.h>
#include <config.h>
#include <synch/spinlock.h>
 
83,13 → 84,17
} else if (data0 > config.base) {
/*
* This is a cross-call.
* data0 contains address of kernel function.
* data0 contains address of the kernel function.
* We call the function only after we verify
* it is on of the supported ones.
* it is one of the supported ones.
*/
#ifdef CONFIG_SMP
if (data0 == (uintptr_t) tlb_shootdown_ipi_recv) {
tlb_shootdown_ipi_recv();
#ifdef CONFIG_VIRT_IDX_DCACHE
} else if (data0 == (uintptr_t) dcache_shootdown_ipi_recv) {
dcache_shootdown_ipi_recv();
#endif
}
#endif
} else {
/trunk/kernel/arch/sparc64/src/cpu/cpu.c
51,6 → 51,11
upa_config.value = upa_config_read();
CPU->arch.mid = upa_config.mid;
#if (defined(CONFIG_SMP) && defined(CONFIG_VIRT_IDX_DCACHE))
CPU->arch.dcache_active = 1;
CPU->arch.dcache_message_count = 0;
#endif
 
/*
* Detect processor frequency.
*/
/trunk/kernel/arch/sparc64/src/mm/tlb.c
482,10 → 482,10
mmu_primary_context_write(ctx.v);
for (i = 0; i < cnt; i++) {
itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, page + i *
PAGE_SIZE);
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, page + i *
PAGE_SIZE);
itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
page + i * PAGE_SIZE);
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
page + i * PAGE_SIZE);
}
mmu_primary_context_write(pc_save.v);
/trunk/kernel/arch/sparc64/src/mm/cache.c
31,10 → 31,127
*/
/**
* @file
* @brief D-cache shootdown algorithm.
*/
 
#include <arch/mm/cache.h>
 
#ifdef CONFIG_SMP
#ifdef CONFIG_VIRT_IDX_DCACHE
 
#include <smp/ipi.h>
#include <arch/interrupt.h>
#include <synch/spinlock.h>
#include <arch.h>
#include <debug.h>
 
/**
* This spinlock is used by the processors to synchronize during the D-cache
* shootdown.
*/
SPINLOCK_INITIALIZE(dcachelock);
 
/** Initialize the D-cache shootdown sequence.
*
* Start the shootdown sequence by sending out an IPI and wait until all
* processors spin on the dcachelock spinlock.
*
* @param type Scope of the D-cache shootdown.
* @param color Color to be invalidated; applicable only for DCACHE_INVL_COLOR
* and DCACHE_INVL_FRAME invalidation types.
* @param frame Frame to be invalidated; applicable only for DCACHE_INVL_FRAME
* invalidation types.
*/
void dcache_shootdown_start(dcache_invalidate_type_t type, int color,
uintptr_t frame)
{
int i;
 
CPU->arch.dcache_active = 0;
spinlock_lock(&dcachelock);
 
for (i = 0; i < config.cpu_count; i++) {
cpu_t *cpu;
 
if (i == CPU->id)
continue;
 
cpu = &cpus[i];
spinlock_lock(&cpu->lock);
if (cpu->arch.dcache_message_count ==
DCACHE_MSG_QUEUE_LEN) {
/*
* The queue is full, flush the cache entirely.
*/
cpu->arch.dcache_message_count = 1;
cpu->arch.dcache_messages[0].type = DCACHE_INVL_ALL;
cpu->arch.dcache_messages[0].color = 0; /* ignored */
cpu->arch.dcache_messages[0].frame = 0; /* ignored */
} else {
index_t idx = cpu->arch.dcache_message_count++;
cpu->arch.dcache_messages[idx].type = type;
cpu->arch.dcache_messages[idx].color = color;
cpu->arch.dcache_messages[idx].frame = frame;
}
spinlock_unlock(&cpu->lock);
}
 
ipi_broadcast(IPI_DCACHE_SHOOTDOWN);
 
busy_wait:
for (i = 0; i < config.cpu_count; i++)
if (cpus[i].arch.dcache_active)
goto busy_wait;
}
 
/** Finish the D-cache shootdown sequence. */
void dcache_shootdown_finalize(void)
{
spinlock_unlock(&dcachelock);
CPU->arch.dcache_active = 1;
}
 
/** Process the D-cache shootdown IPI. */
void dcache_shootdown_ipi_recv(void)
{
int i;
 
ASSERT(CPU);
 
CPU->arch.dcache_active = 0;
spinlock_lock(&dcachelock);
spinlock_unlock(&dcachelock);
spinlock_lock(&CPU->lock);
ASSERT(CPU->arch.dcache_message_count < DCACHE_MSG_QUEUE_LEN);
for (i = 0; i < CPU->arch.dcache_message_count; i++) {
switch (CPU->arch.dcache_messages[i].type) {
case DCACHE_INVL_ALL:
dcache_flush();
goto flushed;
break;
case DCACHE_INVL_COLOR:
dcache_flush_color(CPU->arch.dcache_messages[i].color);
break;
case DCACHE_INVL_FRAME:
dcache_flush_frame(CPU->arch.dcache_messages[i].color,
CPU->arch.dcache_messages[i].frame);
break;
default:
panic("unknown type (%d)\n",
CPU->arch.dcache_messages[i].type);
}
}
flushed:
CPU->arch.dcache_message_count = 0;
spinlock_unlock(&CPU->lock);
 
CPU->arch.dcache_active = 1;
}
 
#endif /* CONFIG_VIRT_IDX_DCACHE */
#endif /* CONFIG_SMP */
 
/** @}
*/