Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 1889 → Rev 1890

/trunk/kernel/generic/include/mm/as.h
160,6 → 160,8
 
/** Data to be used by the backend. */
mem_backend_data_t backend_data;
as_arch_t arch;
};
 
extern as_t *AS_KERNEL;
192,6 → 194,9
#ifndef as_install_arch
extern void as_install_arch(as_t *as);
#endif /* !def as_install_arch */
#ifndef as_deinstall_arch
extern void as_deinstall_arch(as_t *as);
#endif /* !def as_deinstall_arch */
 
/* Backend declarations. */
extern mem_backend_t anon_backend;
/trunk/kernel/generic/src/mm/as.c
84,6 → 84,11
*/
as_operations_t *as_operations = NULL;
 
/**
* Slab for as_t objects.
*/
static slab_cache_t *as_slab;
 
/** This lock protects inactive_as_with_asid_head list. It must be acquired before as_t mutex. */
SPINLOCK_INITIALIZE(inactive_as_with_asid_lock);
 
105,6 → 110,9
void as_init(void)
{
as_arch_init();
as_slab = slab_cache_create("as_slab", sizeof(as_t), 0, NULL, NULL, SLAB_CACHE_MAGDEFERRED);
AS_KERNEL = as_create(FLAG_AS_KERNEL);
if (!AS_KERNEL)
panic("can't create kernel address space\n");
119,7 → 127,7
{
as_t *as;
 
as = (as_t *) malloc(sizeof(as_t), 0);
as = (as_t *) slab_alloc(as_slab, 0);
link_initialize(&as->inactive_as_with_asid_link);
mutex_initialize(&as->lock);
btree_create(&as->as_area_btree);
182,7 → 190,7
 
interrupts_restore(ipl);
free(as);
slab_free(as_slab, as);
}
 
/** Create address space area of common attributes.
798,6 → 806,12
list_append(&old->inactive_as_with_asid_link, &inactive_as_with_asid_head);
}
mutex_unlock(&old->lock);
 
/*
* Perform architecture-specific tasks when the address space
* is being removed from the CPU.
*/
as_deinstall_arch(old);
}
 
/*
/trunk/kernel/arch/xen32/include/mm/as.h
48,6 → 48,7
} as_arch_t;
 
#define as_install_arch(as)
#define as_deinstall_arch(as)
#define as_invalidate_translation_cache(as, page, cnt)
 
extern void as_arch_init(void);
/trunk/kernel/arch/sparc64/include/atomic.h
56,7 → 56,7
 
a = *((uint64_t *) x);
b = a + i;
__asm__ volatile ("casx %0, %1, %2\n": "+m" (*((uint64_t *)x)), "+r" (a), "+r" (b));
__asm__ volatile ("casx %0, %2, %1\n" : "+m" (*((uint64_t *)x)), "+r" (b) : "r" (a));
} while (a != b);
 
return a;
/trunk/kernel/arch/sparc64/include/mm/tsb.h
36,6 → 36,7
#define KERN_sparc64_TSB_H_
 
#include <arch/mm/tte.h>
#include <arch/mm/mmu.h>
#include <arch/types.h>
#include <typedefs.h>
 
46,16 → 47,67
* again, is nice because TSBs need to be locked
* in TLBs - only one TLB entry will do.
*/
#define ITSB_ENTRY_COUNT 2048
#define DTSB_ENTRY_COUNT 2048
#define TSB_SIZE 2 /* when changing this, change as.c as well */
#define ITSB_ENTRY_COUNT (512*(1<<TSB_SIZE))
#define DTSB_ENTRY_COUNT (512*(1<<TSB_SIZE))
 
struct tsb_entry {
tte_tag_t tag;
tte_data_t data;
} __attribute__ ((packed));
 
typedef struct tsb_entry tsb_entry_t;
 
/** TSB Base register. */
union tsb_base_reg {
uint64_t value;
struct {
uint64_t base : 51; /**< TSB base address, bits 63:13. */
unsigned split : 1; /**< Split vs. common TSB for 8K and 64K pages.
* HelenOS uses only 8K pages for user mappings,
* so we always set this to 0.
*/
unsigned : 9;
unsigned size : 3; /**< TSB size. Number of entries is 512*2^size. */
} __attribute__ ((packed));
};
typedef union tsb_base_reg tsb_base_reg_t;
 
/** Read ITSB Base register.
*
* @return Content of the ITSB Base register.
*/
static inline uint64_t itsb_base_read(void)
{
return asi_u64_read(ASI_IMMU, VA_IMMU_TSB_BASE);
}
 
/** Read DTSB Base register.
*
* @return Content of the DTSB Base register.
*/
static inline uint64_t dtsb_base_read(void)
{
return asi_u64_read(ASI_DMMU, VA_DMMU_TSB_BASE);
}
 
/** Write ITSB Base register.
*
* @param v New content of the ITSB Base register.
*/
static inline void itsb_base_write(uint64_t v)
{
asi_u64_write(ASI_IMMU, VA_IMMU_TSB_BASE, v);
}
 
/** Write DTSB Base register.
*
* @param v New content of the DTSB Base register.
*/
static inline void dtsb_base_write(uint64_t v)
{
asi_u64_write(ASI_DMMU, VA_DMMU_TSB_BASE, v);
}
 
extern void tsb_invalidate(as_t *as, uintptr_t page, count_t pages);
 
#endif
/trunk/kernel/arch/sparc64/src/proc/scheduler.c
120,9 → 120,8
if ((THREAD->flags & THREAD_FLAG_USPACE)) {
/*
* If this thread executes also in userspace, we have to force all
* its still-active userspace windows into the userspace window buffer
* and demap the buffer from DTLB.
* If this thread executes also in userspace, we have to
* demap the userspace window buffer from DTLB.
*/
ASSERT(THREAD->arch.uspace_window_buffer);
/trunk/kernel/arch/sparc64/src/mm/as.c
36,7 → 36,12
#include <arch/mm/tlb.h>
#include <genarch/mm/as_ht.h>
#include <genarch/mm/asid_fifo.h>
#include <debug.h>
 
#ifdef CONFIG_TSB
#include <arch/mm/tsb.h>
#endif
 
/** Architecture dependent address space init. */
void as_arch_init(void)
{
44,11 → 49,24
asid_fifo_init();
}
 
/** Perform sparc64-specific tasks when an address space becomes active on the processor.
*
* Install ASID and map TSBs.
*
* @param as Address space.
*/
void as_install_arch(as_t *as)
{
tlb_context_reg_t ctx;
/*
* Note that we don't lock the address space.
* That's correct - we can afford it here
* because we only read members that are
* currently read-only.
*/
/*
* Write ASID to secondary context register.
* The primary context register has to be set
* from TL>0 so it will be filled from the
58,7 → 76,78
ctx.v = 0;
ctx.context = as->asid;
mmu_secondary_context_write(ctx.v);
 
#ifdef CONFIG_TSB
if (as != AS_KERNEL) {
uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
 
ASSERT(as->arch.itsb && as->arch.dtsb);
 
uintptr_t tsb = as->arch.itsb;
if (!overlaps(tsb, 8*PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
/*
* TSBs were allocated from memory not covered
* by the locked 4M kernel DTLB entry. We need
* to map both TSBs explicitly.
*/
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
dtlb_insert_mapping(tsb, KA2PA(tsb), PAGESIZE_64K, true, true);
}
/*
* Setup TSB Base registers.
*/
tsb_base_reg_t tsb_base;
tsb_base.value = 0;
tsb_base.size = TSB_SIZE;
tsb_base.split = 0;
 
tsb_base.base = as->arch.itsb >> PAGE_WIDTH;
itsb_base_write(tsb_base.value);
tsb_base.base = as->arch.dtsb >> PAGE_WIDTH;
dtsb_base_write(tsb_base.value);
}
#endif
}
 
/** Perform sparc64-specific tasks when an address space is removed from the processor.
*
* Demap TSBs.
*
* @param as Address space.
*/
void as_deinstall_arch(as_t *as)
{
 
/*
* Note that we don't lock the address space.
* That's correct - we can afford it here
* because we only read members that are
* currently read-only.
*/
 
#ifdef CONFIG_TSB
if (as != AS_KERNEL) {
uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
 
ASSERT(as->arch.itsb && as->arch.dtsb);
 
uintptr_t tsb = as->arch.itsb;
if (!overlaps(tsb, 8*PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
/*
* TSBs were allocated from memory not covered
* by the locked 4M kernel DTLB entry. We need
* to demap the entry installed by as_install_arch().
*/
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
}
}
#endif
}
 
/** @}
*/
/trunk/kernel/arch/ia64/include/mm/as.h
47,6 → 47,7
typedef struct {
} as_arch_t;
 
#define as_deinstall_arch(as)
#define as_invalidate_translation_cache(as, page, cnt)
 
extern void as_arch_init(void);
/trunk/kernel/arch/ppc32/include/mm/as.h
47,6 → 47,7
typedef struct {
} as_arch_t;
 
#define as_deinstall_arch(as)
#define as_invalidate_translation_cache(as, page, cnt)
 
extern void as_arch_init(void);
/trunk/kernel/arch/amd64/include/mm/as.h
48,6 → 48,7
} as_arch_t;
 
#define as_install_arch(as)
#define as_deinstall_arch(as)
#define as_invalidate_translation_cache(as, page, cnt)
 
extern void as_arch_init(void);
/trunk/kernel/arch/ppc64/include/mm/as.h
48,6 → 48,7
} as_arch_t;
 
#define as_install_arch(as)
#define as_deinstall_arch(as)
#define as_invalidate_translation_cache(as, page, cnt)
 
extern void as_arch_init(void);
/trunk/kernel/arch/mips32/include/mm/as.h
47,6 → 47,7
typedef struct {
} as_arch_t;
 
#define as_deinstall_arch(as)
#define as_invalidate_translation_cache(as, page, cnt)
 
extern void as_arch_init(void);
/trunk/kernel/arch/ia32/include/mm/as.h
48,6 → 48,7
} as_arch_t;
 
#define as_install_arch(as)
#define as_deinstall_arch(as)
#define as_invalidate_translation_cache(as, page, cnt)
 
extern void as_arch_init(void);
/trunk/uspace/libc/arch/sparc64/include/atomic.h
51,11 → 51,9
uint64_t a, b;
 
do {
volatile uintptr_t x = (uint64_t) &val->count;
 
a = *((uint64_t *) x);
a = val->count;
b = a + i;
__asm__ volatile ("casx %0, %1, %2\n": "+m" (*((uint64_t *)x)), "+r" (a), "+r" (b));
__asm__ volatile ("casx %0, %2, %1\n" : "+m" (*val), "+r" (b) : "r" (a));
} while (a != b);
 
return a;