Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 2139 → Rev 2141

/trunk/kernel/genarch/src/fb/fb.c
511,8 → 511,6
sysinfo_set_item_val("fb.scanline", NULL, scan);
sysinfo_set_item_val("fb.visual", NULL, visual);
sysinfo_set_item_val("fb.address.physical", NULL, addr);
sysinfo_set_item_val("fb.address.color", NULL,
PAGE_COLOR((uintptr_t) fbaddress));
sysinfo_set_item_val("fb.invert-colors", NULL, invert_colors);
 
/* Allocate double buffer */
/trunk/kernel/genarch/src/mm/page_ht.c
55,7 → 55,8
static bool compare(unative_t key[], count_t keys, link_t *item);
static void remove_callback(link_t *item);
 
static void ht_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int flags);
static void ht_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame,
int flags);
static void ht_mapping_remove(as_t *as, uintptr_t page);
static pte_t *ht_mapping_find(as_t *as, uintptr_t page);
 
103,7 → 104,7
* of occurring. Least significant bits of VPN compose the
* hash index.
*/
index = ((page >> PAGE_WIDTH) & (PAGE_HT_ENTRIES-1));
index = ((page >> PAGE_WIDTH) & (PAGE_HT_ENTRIES - 1));
/*
* Address space structures are likely to be allocated from
110,7 → 111,7
* similar addresses. Least significant bits compose the
* hash index.
*/
index |= ((unative_t) as) & (PAGE_HT_ENTRIES-1);
index |= ((unative_t) as) & (PAGE_HT_ENTRIES - 1);
return index;
}
136,7 → 137,8
t = hash_table_get_instance(item, pte_t, link);
 
if (keys == PAGE_HT_KEYS) {
return (key[KEY_AS] == (uintptr_t) t->as) && (key[KEY_PAGE] == t->page);
return (key[KEY_AS] == (uintptr_t) t->as) &&
(key[KEY_PAGE] == t->page);
} else {
return (key[KEY_AS] == (uintptr_t) t->as);
}
175,7 → 177,10
void ht_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int flags)
{
pte_t *t;
unative_t key[2] = { (uintptr_t) as, page = ALIGN_DOWN(page, PAGE_SIZE) };
unative_t key[2] = {
(uintptr_t) as,
page = ALIGN_DOWN(page, PAGE_SIZE)
};
if (!hash_table_find(&page_ht, key)) {
t = (pte_t *) malloc(sizeof(pte_t), FRAME_ATOMIC);
209,7 → 214,10
*/
void ht_mapping_remove(as_t *as, uintptr_t page)
{
unative_t key[2] = { (uintptr_t) as, page = ALIGN_DOWN(page, PAGE_SIZE) };
unative_t key[2] = {
(uintptr_t) as,
page = ALIGN_DOWN(page, PAGE_SIZE)
};
/*
* Note that removed PTE's will be freed
234,7 → 242,10
{
link_t *hlp;
pte_t *t = NULL;
unative_t key[2] = { (uintptr_t) as, page = ALIGN_DOWN(page, PAGE_SIZE) };
unative_t key[2] = {
(uintptr_t) as,
page = ALIGN_DOWN(page, PAGE_SIZE)
};
hlp = hash_table_find(&page_ht, key);
if (hlp)
/trunk/kernel/generic/src/time/clock.c
104,8 → 104,6
* physmem_map() the clock_parea.
*/
sysinfo_set_item_val("clock.cacheable", NULL, (unative_t) true);
sysinfo_set_item_val("clock.fcolor", NULL, (unative_t)
PAGE_COLOR(clock_parea.vbase));
sysinfo_set_item_val("clock.faddr", NULL, (unative_t) faddr);
}
 
/trunk/kernel/generic/src/ddi/ddi.c
99,8 → 99,7
* @return 0 on success, EPERM if the caller lacks capabilities to use this
* syscall, ENOENT if there is no task matching the specified ID or the
* physical address space is not enabled for mapping and ENOMEM if there
* was a problem in creating address space area. ENOTSUP is returned when
* an attempt to create an illegal address alias is detected.
* was a problem in creating address space area.
*/
static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, count_t pages, int flags)
{
139,18 → 138,6
interrupts_restore(ipl);
return ENOENT;
}
 
#ifdef CONFIG_VIRT_IDX_DCACHE
if (PAGE_COLOR(parea->vbase) != PAGE_COLOR(vp)) {
/*
* Refuse to create an illegal address alias.
*/
spinlock_unlock(&parea_lock);
interrupts_restore(ipl);
return ENOTSUP;
}
#endif /* CONFIG_VIRT_IDX_DCACHE */
 
spinlock_unlock(&parea_lock);
 
spinlock_lock(&TASK->lock);
/trunk/kernel/generic/src/console/klog.c
90,8 → 90,6
ddi_parea_register(&klog_parea);
 
sysinfo_set_item_val("klog.faddr", NULL, (unative_t) faddr);
sysinfo_set_item_val("klog.fcolor", NULL, (unative_t)
PAGE_COLOR((uintptr_t) klog));
sysinfo_set_item_val("klog.pages", NULL, 1 << KLOG_ORDER);
sysinfo_set_item_val("klog.devno", NULL, devno);
sysinfo_set_item_val("klog.inr", NULL, KLOG_VIRT_INR);
/trunk/kernel/generic/src/lib/rd.c
90,8 → 90,6
sysinfo_set_item_val("rd.size", NULL, dsize);
sysinfo_set_item_val("rd.address.physical", NULL, (unative_t)
KA2PA((void *) header + hsize));
sysinfo_set_item_val("rd.address.color", NULL, (unative_t)
PAGE_COLOR((uintptr_t) header + hsize));
 
return RE_OK;
}
/trunk/kernel/generic/src/mm/backend_anon.c
157,21 → 157,6
if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
panic("Could not insert used space.\n");
#ifdef CONFIG_VIRT_IDX_DCACHE
if (dirty && PAGE_COLOR(PA2KA(frame)) != PAGE_COLOR(addr)) {
/*
* By writing to the frame using kernel virtual address,
* we have created an illegal virtual alias. We now have to
* invalidate cachelines belonging to addr on all processors
* so that they will be reloaded with the new content on next
* read.
*/
dcache_flush_frame(addr, frame);
dcache_shootdown_start(DCACHE_INVL_FRAME, PAGE_COLOR(addr), frame);
dcache_shootdown_finalize();
}
#endif
 
return AS_PF_OK;
}
 
240,4 → 225,3
 
/** @}
*/
 
/trunk/kernel/generic/src/mm/as.c
613,8 → 613,7
* such address space area, EPERM if there was a problem in accepting the area
* or ENOMEM if there was a problem in allocating destination address space
* area. ENOTSUP is returned if the address space area backend does not support
* sharing or if the kernel detects an attempt to create an illegal address
* alias.
* sharing.
*/
int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
as_t *dst_as, uintptr_t dst_base, int dst_flags_mask)
667,20 → 666,6
return EPERM;
}
 
#ifdef CONFIG_VIRT_IDX_DCACHE
if (!(dst_flags_mask & AS_AREA_EXEC)) {
if (PAGE_COLOR(src_area->base) != PAGE_COLOR(dst_base)) {
/*
* Refuse to create an illegal address alias.
*/
mutex_unlock(&src_area->lock);
mutex_unlock(&src_as->lock);
interrupts_restore(ipl);
return ENOTSUP;
}
}
#endif /* CONFIG_VIRT_IDX_DCACHE */
 
/*
* Now we are committed to sharing the area.
* First, prepare the area for sharing.
901,9 → 886,9
* list of inactive address spaces with assigned
* ASID.
*/
ASSERT(old_as->asid != ASID_INVALID);
list_append(&old_as->inactive_as_with_asid_link,
&inactive_as_with_asid_head);
ASSERT(old_as->asid != ASID_INVALID);
list_append(&old_as->inactive_as_with_asid_link,
&inactive_as_with_asid_head);
}
mutex_unlock(&old_as->lock);
 
/trunk/kernel/generic/src/mm/backend_elf.c
209,21 → 209,6
if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
panic("Could not insert used space.\n");
 
#ifdef CONFIG_VIRT_IDX_DCACHE
if (dirty && PAGE_COLOR(PA2KA(frame)) != PAGE_COLOR(addr)) {
/*
* By writing to the frame using kernel virtual address,
* we have created an illegal virtual alias. We now have to
* invalidate cachelines belonging to addr on all processors
* so that they will be reloaded with the new content on next
* read.
*/
dcache_flush_frame(addr, frame);
dcache_shootdown_start(DCACHE_INVL_FRAME, PAGE_COLOR(addr), frame);
dcache_shootdown_finalize();
}
#endif
 
return AS_PF_OK;
}
 
355,4 → 340,3
 
/** @}
*/
 
/trunk/kernel/arch/sparc64/include/stack.h
43,7 → 43,7
/**
* 16-extended-word save area for %i[0-7] and %l[0-7] registers.
*/
#define STACK_WINDOW_SAVE_AREA_SIZE (16*STACK_ITEM_SIZE)
#define STACK_WINDOW_SAVE_AREA_SIZE (16 * STACK_ITEM_SIZE)
 
/**
* By convention, the actual top of the stack is %sp + STACK_BIAS.
/trunk/kernel/arch/sparc64/include/mm/frame.h
35,7 → 35,20
#ifndef KERN_sparc64_FRAME_H_
#define KERN_sparc64_FRAME_H_
 
#define FRAME_WIDTH 13 /* 8K */
/*
* Page size supported by the MMU.
* For 8K there is the nasty illegal virtual aliasing problem.
* Therefore, the kernel uses 8K only internally on the TLB and TSB levels.
*/
#define MMU_FRAME_WIDTH 13 /* 8K */
#define MMU_FRAME_SIZE (1 << MMU_FRAME_WIDTH)
 
/*
* Page size exported to the generic memory management subsystems.
* This page size is not directly supported by the MMU, but we can emulate
* each 16K page with a pair of adjacent 8K pages.
*/
#define FRAME_WIDTH 14 /* 16K */
#define FRAME_SIZE (1 << FRAME_WIDTH)
 
#ifdef KERNEL
/trunk/kernel/arch/sparc64/include/mm/page.h
37,11 → 37,27
 
#include <arch/mm/frame.h>
 
/*
* On the TLB and TSB level, we still use 8K pages, which are supported by the
* MMU.
*/
#define MMU_PAGE_WIDTH MMU_FRAME_WIDTH
#define MMU_PAGE_SIZE MMU_FRAME_SIZE
 
/*
* On the page table level, we use 16K pages. 16K pages are not supported by
* the MMU but we emulate them with pairs of 8K pages.
*/
#define PAGE_WIDTH FRAME_WIDTH
#define PAGE_SIZE FRAME_SIZE
 
#define PAGE_COLOR_BITS 1 /**< 14 - 13; 2^14 == 16K == alias boundary. */
#define MMU_PAGES_PER_PAGE (1 << (PAGE_WIDTH - MMU_PAGE_WIDTH))
 
/*
* With 16K pages, there is only one page color.
*/
#define PAGE_COLOR_BITS 0 /**< 14 - 14; 2^14 == 16K == alias boundary. */
 
#ifdef KERNEL
 
#ifndef __ASM__
/trunk/kernel/arch/sparc64/include/mm/as.h
81,10 → 81,11
#include <genarch/mm/as_ht.h>
 
#ifdef CONFIG_TSB
# include <arch/mm/tsb.h>
# define as_invalidate_translation_cache(as, page, cnt) tsb_invalidate(as, page, cnt)
#include <arch/mm/tsb.h>
#define as_invalidate_translation_cache(as, page, cnt) \
tsb_invalidate((as), (page), (cnt))
#else
# define as_invalidate_translation_cache(as, page, cnt)
#define as_invalidate_translation_cache(as, page, cnt)
#endif
 
extern void as_arch_init(void);
/trunk/kernel/arch/sparc64/include/mm/cache.h
43,44 → 43,10
#define dcache_flush_frame(p, f) \
dcache_flush_tag(PAGE_COLOR((p)), ADDR2PFN((f)));
 
/**
* Enumerations to differentiate among different scopes of D-Cache
* invalidation.
*/
typedef enum {
DCACHE_INVL_INVALID,
DCACHE_INVL_ALL,
DCACHE_INVL_COLOR,
DCACHE_INVL_FRAME
} dcache_invalidate_type_t;
 
/**
* Number of messages that can be queued in the cpu_arch_t structure at a time.
*/
#define DCACHE_MSG_QUEUE_LEN 10
 
/** D-cache shootdown message type. */
typedef struct {
dcache_invalidate_type_t type;
int color;
uintptr_t frame;
} dcache_shootdown_msg_t;
 
extern void dcache_flush(void);
extern void dcache_flush_color(int c);
extern void dcache_flush_tag(int c, pfn_t tag);
 
#ifdef CONFIG_SMP
extern void dcache_shootdown_start(dcache_invalidate_type_t type, int color,
uintptr_t frame);
extern void dcache_shootdown_finalize(void);
extern void dcache_shootdown_ipi_recv(void);
#else
#define dcache_shootdown_start(t, c, f)
#define dcache_shootdown_finalize()
#define dcache_shootdown_ipi_recv()
#endif /* CONFIG_SMP */
 
#endif
 
/** @}
/trunk/kernel/arch/sparc64/include/mm/tsb.h
112,8 → 112,8
struct pte;
 
extern void tsb_invalidate(struct as *as, uintptr_t page, count_t pages);
extern void itsb_pte_copy(struct pte *t);
extern void dtsb_pte_copy(struct pte *t, bool ro);
extern void itsb_pte_copy(struct pte *t, index_t index);
extern void dtsb_pte_copy(struct pte *t, index_t index, bool ro);
 
#endif /* !def __ASM__ */
 
/trunk/kernel/arch/sparc64/include/cpu.h
64,11 → 64,6
uint64_t next_tick_cmpr; /**< Next clock interrupt should be
generated when the TICK register
matches this value. */
#ifdef CONFIG_SMP
int dcache_active;
dcache_shootdown_msg_t dcache_messages[DCACHE_MSG_QUEUE_LEN];
count_t dcache_message_count;
#endif
} cpu_arch_t;
#endif
/trunk/kernel/arch/sparc64/Makefile.inc
83,8 → 83,7
arch/$(ARCH)/src/fpu_context.c \
arch/$(ARCH)/src/dummy.s \
arch/$(ARCH)/src/mm/as.c \
arch/$(ARCH)/src/mm/cache.c \
arch/$(ARCH)/src/mm/cache_asm.S \
arch/$(ARCH)/src/mm/cache.S \
arch/$(ARCH)/src/mm/frame.c \
arch/$(ARCH)/src/mm/page.c \
arch/$(ARCH)/src/mm/tlb.c \
/trunk/kernel/arch/sparc64/src/smp/ipi.c
39,7 → 39,6
#include <arch/asm.h>
#include <config.h>
#include <mm/tlb.h>
#include <arch/mm/cache.h>
#include <arch/interrupt.h>
#include <arch/trap/interrupt.h>
#include <arch/barrier.h>
125,11 → 124,6
case IPI_TLB_SHOOTDOWN:
func = tlb_shootdown_ipi_recv;
break;
#if (defined(CONFIG_SMP) && (defined(CONFIG_VIRT_IDX_DCACHE)))
case IPI_DCACHE_SHOOTDOWN:
func = dcache_shootdown_ipi_recv;
break;
#endif
default:
panic("Unknown IPI (%d).\n", ipi);
break;
/trunk/kernel/arch/sparc64/src/trap/interrupt.c
44,7 → 44,6
#include <print.h>
#include <arch.h>
#include <mm/tlb.h>
#include <arch/mm/cache.h>
#include <config.h>
#include <synch/spinlock.h>
 
91,10 → 90,6
#ifdef CONFIG_SMP
if (data0 == (uintptr_t) tlb_shootdown_ipi_recv) {
tlb_shootdown_ipi_recv();
#ifdef CONFIG_VIRT_IDX_DCACHE
} else if (data0 == (uintptr_t) dcache_shootdown_ipi_recv) {
dcache_shootdown_ipi_recv();
#endif
}
#endif
} else {
/trunk/kernel/arch/sparc64/src/cpu/cpu.c
51,11 → 51,6
upa_config.value = upa_config_read();
CPU->arch.mid = upa_config.mid;
#if (defined(CONFIG_SMP) && defined(CONFIG_VIRT_IDX_DCACHE))
CPU->arch.dcache_active = 1;
CPU->arch.dcache_message_count = 0;
#endif
 
/*
* Detect processor frequency.
*/
/trunk/kernel/arch/sparc64/src/mm/cache_asm.S
File deleted
/trunk/kernel/arch/sparc64/src/mm/cache.c
File deleted
/trunk/kernel/arch/sparc64/src/mm/tlb.c
54,14 → 54,14
#include <arch/mm/tsb.h>
#endif
 
static void dtlb_pte_copy(pte_t *t, bool ro);
static void itlb_pte_copy(pte_t *t);
static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const
char *str);
static void dtlb_pte_copy(pte_t *t, index_t index, bool ro);
static void itlb_pte_copy(pte_t *t, index_t index);
static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,
const char *str);
static void do_fast_data_access_mmu_miss_fault(istate_t *istate,
tlb_tag_access_reg_t tag, const char *str);
tlb_tag_access_reg_t tag, const char *str);
static void do_fast_data_access_protection_fault(istate_t *istate,
tlb_tag_access_reg_t tag, const char *str);
tlb_tag_access_reg_t tag, const char *str);
 
char *context_encoding[] = {
"Primary",
92,8 → 92,8
* @param locked True for permanent mappings, false otherwise.
* @param cacheable True if the mapping is cacheable, false otherwise.
*/
void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize, bool
locked, bool cacheable)
void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize,
bool locked, bool cacheable)
{
tlb_tag_access_reg_t tag;
tlb_data_t data;
126,11 → 126,12
 
/** Copy PTE to TLB.
*
* @param t Page Table Entry to be copied.
* @param ro If true, the entry will be created read-only, regardless of its w
* field.
* @param t Page Table Entry to be copied.
* @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
* @param ro If true, the entry will be created read-only, regardless of its
* w field.
*/
void dtlb_pte_copy(pte_t *t, bool ro)
void dtlb_pte_copy(pte_t *t, index_t index, bool ro)
{
tlb_tag_access_reg_t tag;
tlb_data_t data;
137,15 → 138,15
page_address_t pg;
frame_address_t fr;
 
pg.address = t->page;
fr.address = t->frame;
pg.address = t->page + (index << MMU_PAGE_WIDTH);
fr.address = t->frame + (index << MMU_PAGE_WIDTH);
 
tag.value = 0;
tag.context = t->as->asid;
tag.vpn = pg.vpn;
 
dtlb_tag_access_write(tag.value);
 
data.value = 0;
data.v = true;
data.size = PAGESIZE_8K;
158,15 → 159,16
data.p = t->k; /* p like privileged */
data.w = ro ? false : t->w;
data.g = t->g;
 
dtlb_data_in_write(data.value);
}
 
/** Copy PTE to ITLB.
*
* @param t Page Table Entry to be copied.
* @param t Page Table Entry to be copied.
* @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
*/
void itlb_pte_copy(pte_t *t)
void itlb_pte_copy(pte_t *t, index_t index)
{
tlb_tag_access_reg_t tag;
tlb_data_t data;
173,8 → 175,8
page_address_t pg;
frame_address_t fr;
 
pg.address = t->page;
fr.address = t->frame;
pg.address = t->page + (index << MMU_PAGE_WIDTH);
fr.address = t->frame + (index << MMU_PAGE_WIDTH);
 
tag.value = 0;
tag.context = t->as->asid;
199,6 → 201,7
void fast_instruction_access_mmu_miss(int n, istate_t *istate)
{
uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
index_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE;
pte_t *t;
 
page_table_lock(AS, true);
209,9 → 212,9
* Insert it into ITLB.
*/
t->a = true;
itlb_pte_copy(t);
itlb_pte_copy(t, index);
#ifdef CONFIG_TSB
itsb_pte_copy(t);
itsb_pte_copy(t, index);
#endif
page_table_unlock(AS, true);
} else {
222,7 → 225,7
page_table_unlock(AS, true);
if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
do_fast_instruction_access_mmu_miss_fault(istate,
__FUNCTION__);
__FUNCTION__);
}
}
}
236,19 → 239,21
{
tlb_tag_access_reg_t tag;
uintptr_t va;
index_t index;
pte_t *t;
 
tag.value = dtlb_tag_access_read();
va = tag.vpn << PAGE_WIDTH;
va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
index = tag.vpn % MMU_PAGES_PER_PAGE;
 
if (tag.context == ASID_KERNEL) {
if (!tag.vpn) {
/* NULL access in kernel */
do_fast_data_access_mmu_miss_fault(istate, tag,
__FUNCTION__);
__FUNCTION__);
}
do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected "
"kernel page fault.");
"kernel page fault.");
}
 
page_table_lock(AS, true);
259,19 → 264,20
* Insert it into DTLB.
*/
t->a = true;
dtlb_pte_copy(t, true);
dtlb_pte_copy(t, index, true);
#ifdef CONFIG_TSB
dtsb_pte_copy(t, true);
dtsb_pte_copy(t, index, true);
#endif
page_table_unlock(AS, true);
} else {
/*
* Forward the page fault to the address space page fault handler.
* Forward the page fault to the address space page fault
* handler.
*/
page_table_unlock(AS, true);
if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
do_fast_data_access_mmu_miss_fault(istate, tag,
__FUNCTION__);
__FUNCTION__);
}
}
}
281,10 → 287,12
{
tlb_tag_access_reg_t tag;
uintptr_t va;
index_t index;
pte_t *t;
 
tag.value = dtlb_tag_access_read();
va = tag.vpn << PAGE_WIDTH;
va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
index = tag.vpn % MMU_PAGES_PER_PAGE; /* 16K-page emulation */
 
page_table_lock(AS, true);
t = page_mapping_find(AS, va);
296,10 → 304,11
*/
t->a = true;
t->d = true;
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY, va);
dtlb_pte_copy(t, false);
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY,
va + index * MMU_PAGE_SIZE);
dtlb_pte_copy(t, index, false);
#ifdef CONFIG_TSB
dtsb_pte_copy(t, false);
dtsb_pte_copy(t, index, false);
#endif
page_table_unlock(AS, true);
} else {
310,7 → 319,7
page_table_unlock(AS, true);
if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
do_fast_data_access_protection_fault(istate, tag,
__FUNCTION__);
__FUNCTION__);
}
}
}
328,10 → 337,10
t.value = itlb_tag_read_read(i);
 
printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
"ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, "
"cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag,
d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
"ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, "
"cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag,
d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
}
 
printf("D-TLB contents:\n");
340,16 → 349,16
t.value = dtlb_tag_read_read(i);
printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
"ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, "
"cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag,
d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
"ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, "
"cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag,
d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
}
 
}
 
void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char
*str)
void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,
const char *str)
{
fault_if_from_uspace(istate, "%s\n", str);
dump_istate(istate);
356,29 → 365,29
panic("%s\n", str);
}
 
void do_fast_data_access_mmu_miss_fault(istate_t *istate, tlb_tag_access_reg_t
tag, const char *str)
void do_fast_data_access_mmu_miss_fault(istate_t *istate,
tlb_tag_access_reg_t tag, const char *str)
{
uintptr_t va;
 
va = tag.vpn << PAGE_WIDTH;
va = tag.vpn << MMU_PAGE_WIDTH;
 
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va,
tag.context);
tag.context);
dump_istate(istate);
printf("Faulting page: %p, ASID=%d\n", va, tag.context);
panic("%s\n", str);
}
 
void do_fast_data_access_protection_fault(istate_t *istate, tlb_tag_access_reg_t
tag, const char *str)
void do_fast_data_access_protection_fault(istate_t *istate,
tlb_tag_access_reg_t tag, const char *str)
{
uintptr_t va;
 
va = tag.vpn << PAGE_WIDTH;
va = tag.vpn << MMU_PAGE_WIDTH;
 
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va,
tag.context);
tag.context);
printf("Faulting page: %p, ASID=%d\n", va, tag.context);
dump_istate(istate);
panic("%s\n", str);
393,8 → 402,8
sfar = dtlb_sfar_read();
printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, "
"fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w,
sfsr.ow, sfsr.fv);
"fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w,
sfsr.ow, sfsr.fv);
printf("DTLB SFAR: address=%p\n", sfar);
dtlb_sfsr_write(0);
481,11 → 490,11
ctx.context = asid;
mmu_primary_context_write(ctx.v);
for (i = 0; i < cnt; i++) {
for (i = 0; i < cnt * MMU_PAGES_PER_PAGE; i++) {
itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
page + i * PAGE_SIZE);
page + i * MMU_PAGE_SIZE);
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
page + i * PAGE_SIZE);
page + i * MMU_PAGE_SIZE);
}
mmu_primary_context_write(pc_save.v);
/trunk/kernel/arch/sparc64/src/mm/as.c
62,7 → 62,7
{
#ifdef CONFIG_TSB
int order = fnzb32(((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
sizeof(tsb_entry_t)) >> FRAME_WIDTH);
sizeof(tsb_entry_t)) >> MMU_FRAME_WIDTH);
uintptr_t tsb = (uintptr_t) frame_alloc(order, flags | FRAME_KA);
 
if (!tsb)
71,8 → 71,8
as->arch.itsb = (tsb_entry_t *) tsb;
as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT *
sizeof(tsb_entry_t));
memsetb((uintptr_t) as->arch.itsb, (ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT)
* sizeof(tsb_entry_t), 0);
memsetb((uintptr_t) as->arch.itsb,
(ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * sizeof(tsb_entry_t), 0);
#endif
return 0;
}
81,7 → 81,7
{
#ifdef CONFIG_TSB
count_t cnt = ((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
sizeof(tsb_entry_t)) >> FRAME_WIDTH;
sizeof(tsb_entry_t)) >> MMU_FRAME_WIDTH;
frame_free(KA2PA((uintptr_t) as->arch.itsb));
return cnt;
#else
139,7 → 139,7
 
uintptr_t tsb = (uintptr_t) as->arch.itsb;
if (!overlaps(tsb, 8 * PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
/*
* TSBs were allocated from memory not covered
* by the locked 4M kernel DTLB entry. We need
158,9 → 158,9
tsb_base.size = TSB_SIZE;
tsb_base.split = 0;
 
tsb_base.base = ((uintptr_t) as->arch.itsb) >> PAGE_WIDTH;
tsb_base.base = ((uintptr_t) as->arch.itsb) >> MMU_PAGE_WIDTH;
itsb_base_write(tsb_base.value);
tsb_base.base = ((uintptr_t) as->arch.dtsb) >> PAGE_WIDTH;
tsb_base.base = ((uintptr_t) as->arch.dtsb) >> MMU_PAGE_WIDTH;
dtsb_base_write(tsb_base.value);
#endif
}
189,7 → 189,7
 
uintptr_t tsb = (uintptr_t) as->arch.itsb;
if (!overlaps(tsb, 8 * PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
/*
* TSBs were allocated from memory not covered
* by the locked 4M kernel DTLB entry. We need
/trunk/kernel/arch/sparc64/src/mm/cache.S
0,0 → 1,91
/*
* Copyright (c) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <arch/arch.h>
 
#define DCACHE_SIZE (16 * 1024)
#define DCACHE_LINE_SIZE 32
 
#define DCACHE_TAG_SHIFT 2
 
.register %g2, #scratch
.register %g3, #scratch
 
/** Flush the whole D-cache. */
.global dcache_flush
dcache_flush:
set (DCACHE_SIZE - DCACHE_LINE_SIZE), %g1
stxa %g0, [%g1] ASI_DCACHE_TAG
0: membar #Sync
subcc %g1, DCACHE_LINE_SIZE, %g1
bnz,pt %xcc, 0b
stxa %g0, [%g1] ASI_DCACHE_TAG
retl
membar #Sync
 
/** Flush only D-cache lines of one virtual color.
*
* @param o0 Virtual color to be flushed.
*/
.global dcache_flush_color
dcache_flush_color:
mov (DCACHE_SIZE / DCACHE_LINE_SIZE) / 2, %g1
set DCACHE_SIZE / 2, %g2
sllx %g2, %o0, %g2
sub %g2, DCACHE_LINE_SIZE, %g2
0: stxa %g0, [%g2] ASI_DCACHE_TAG
membar #Sync
subcc %g1, 1, %g1
bnz,pt %xcc, 0b
sub %g2, DCACHE_LINE_SIZE, %g2
retl
nop
 
/** Flush only D-cache lines of one virtual color and one tag.
*
* @param o0 Virtual color to lookup the tag.
* @param o1 Tag of the cachelines to be flushed.
*/
.global dcache_flush_tag
dcache_flush_tag:
mov (DCACHE_SIZE / DCACHE_LINE_SIZE) / 2, %g1
set DCACHE_SIZE / 2, %g2
sllx %g2, %o0, %g2
sub %g2, DCACHE_LINE_SIZE, %g2
0: ldxa [%g2] ASI_DCACHE_TAG, %g3
srlx %g3, DCACHE_TAG_SHIFT, %g3
cmp %g3, %o1
bnz 1f
nop
stxa %g0, [%g2] ASI_DCACHE_TAG
membar #Sync
1: subcc %g1, 1, %g1
bnz,pt %xcc, 0b
sub %g2, DCACHE_LINE_SIZE, %g2
retl
nop
/trunk/kernel/arch/sparc64/src/mm/tsb.c
34,6 → 34,7
 
#include <arch/mm/tsb.h>
#include <arch/mm/tlb.h>
#include <arch/mm/page.h>
#include <arch/barrier.h>
#include <mm/as.h>
#include <arch/types.h>
40,7 → 41,7
#include <macros.h>
#include <debug.h>
 
#define TSB_INDEX_MASK ((1 << (21 + 1 + TSB_SIZE - PAGE_WIDTH)) - 1)
#define TSB_INDEX_MASK ((1 << (21 + 1 + TSB_SIZE - MMU_PAGE_WIDTH)) - 1)
 
/** Invalidate portion of TSB.
*
59,28 → 60,31
ASSERT(as->arch.itsb && as->arch.dtsb);
i0 = (page >> PAGE_WIDTH) & TSB_INDEX_MASK;
cnt = min(pages, ITSB_ENTRY_COUNT);
i0 = (page >> MMU_PAGE_WIDTH) & TSB_INDEX_MASK;
cnt = min(pages * MMU_PAGES_PER_PAGE, ITSB_ENTRY_COUNT);
for (i = 0; i < cnt; i++) {
as->arch.itsb[(i0 + i) & (ITSB_ENTRY_COUNT - 1)].tag.invalid =
true;
true;
as->arch.dtsb[(i0 + i) & (DTSB_ENTRY_COUNT - 1)].tag.invalid =
true;
true;
}
}
 
/** Copy software PTE to ITSB.
*
* @param t Software PTE.
* @param t Software PTE.
* @param index Zero if lower 8K-subpage, one if higher 8K subpage.
*/
void itsb_pte_copy(pte_t *t)
void itsb_pte_copy(pte_t *t, index_t index)
{
as_t *as;
tsb_entry_t *tsb;
index_t entry;
as = t->as;
tsb = &as->arch.itsb[(t->page >> PAGE_WIDTH) & TSB_INDEX_MASK];
entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK;
tsb = &as->arch.itsb[entry];
 
/*
* We use write barriers to make sure that the TSB load
95,10 → 99,11
write_barrier();
 
tsb->tag.context = as->asid;
tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT;
tsb->tag.va_tag = (t->page + (index << MMU_PAGE_WIDTH)) >>
VA_TAG_PAGE_SHIFT;
tsb->data.value = 0;
tsb->data.size = PAGESIZE_8K;
tsb->data.pfn = t->frame >> FRAME_WIDTH;
tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index;
tsb->data.cp = t->c;
tsb->data.p = t->k; /* p as privileged */
tsb->data.v = t->p;
110,16 → 115,19
 
/** Copy software PTE to DTSB.
*
* @param t Software PTE.
* @param ro If true, the mapping is copied read-only.
* @param t Software PTE.
* @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
* @param ro If true, the mapping is copied read-only.
*/
void dtsb_pte_copy(pte_t *t, bool ro)
void dtsb_pte_copy(pte_t *t, index_t index, bool ro)
{
as_t *as;
tsb_entry_t *tsb;
index_t entry;
as = t->as;
tsb = &as->arch.dtsb[(t->page >> PAGE_WIDTH) & TSB_INDEX_MASK];
entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK;
tsb = &as->arch.dtsb[entry];
 
/*
* We use write barriers to make sure that the TSB load
134,10 → 142,11
write_barrier();
 
tsb->tag.context = as->asid;
tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT;
tsb->tag.va_tag = (t->page + (index << MMU_PAGE_WIDTH)) >>
VA_TAG_PAGE_SHIFT;
tsb->data.value = 0;
tsb->data.size = PAGESIZE_8K;
tsb->data.pfn = t->frame >> FRAME_WIDTH;
tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index;
tsb->data.cp = t->c;
#ifdef CONFIG_VIRT_IDX_DCACHE
tsb->data.cv = t->c;
/trunk/kernel/arch/sparc64/src/mm/page.c
73,9 → 73,9
*/
for (i = 0; i < bsp_locked_dtlb_entries; i++) {
dtlb_insert_mapping(bsp_locked_dtlb_entry[i].virt_page,
bsp_locked_dtlb_entry[i].phys_page,
bsp_locked_dtlb_entry[i].pagesize_code, true,
false);
bsp_locked_dtlb_entry[i].phys_page,
bsp_locked_dtlb_entry[i].pagesize_code, true,
false);
}
#endif
 
107,26 → 107,26
size_t increment;
count_t count;
} sizemap[] = {
{ PAGESIZE_8K, 0, 1 }, /* 8K */
{ PAGESIZE_8K, PAGE_SIZE, 2 }, /* 16K */
{ PAGESIZE_8K, PAGE_SIZE, 4 }, /* 32K */
{ PAGESIZE_64K, 0, 1}, /* 64K */
{ PAGESIZE_64K, 8 * PAGE_SIZE, 2 }, /* 128K */
{ PAGESIZE_64K, 8 * PAGE_SIZE, 4 }, /* 256K */
{ PAGESIZE_512K, 0, 1 }, /* 512K */
{ PAGESIZE_512K, 64 * PAGE_SIZE, 2 }, /* 1M */
{ PAGESIZE_512K, 64 * PAGE_SIZE, 4 }, /* 2M */
{ PAGESIZE_4M, 0, 1 }, /* 4M */
{ PAGESIZE_4M, 512 * PAGE_SIZE, 2 } /* 8M */
{ PAGESIZE_8K, 0, 1 }, /* 8K */
{ PAGESIZE_8K, MMU_PAGE_SIZE, 2 }, /* 16K */
{ PAGESIZE_8K, MMU_PAGE_SIZE, 4 }, /* 32K */
{ PAGESIZE_64K, 0, 1}, /* 64K */
{ PAGESIZE_64K, 8 * MMU_PAGE_SIZE, 2 }, /* 128K */
{ PAGESIZE_64K, 8 * MMU_PAGE_SIZE, 4 }, /* 256K */
{ PAGESIZE_512K, 0, 1 }, /* 512K */
{ PAGESIZE_512K, 64 * MMU_PAGE_SIZE, 2 }, /* 1M */
{ PAGESIZE_512K, 64 * MMU_PAGE_SIZE, 4 }, /* 2M */
{ PAGESIZE_4M, 0, 1 }, /* 4M */
{ PAGESIZE_4M, 512 * MMU_PAGE_SIZE, 2 } /* 8M */
};
ASSERT(ALIGN_UP(physaddr, PAGE_SIZE) == physaddr);
ASSERT(ALIGN_UP(physaddr, MMU_PAGE_SIZE) == physaddr);
ASSERT(size <= 8 * 1024 * 1024);
if (size <= FRAME_SIZE)
if (size <= MMU_FRAME_SIZE)
order = 0;
else
order = (fnzb64(size - 1) + 1) - FRAME_WIDTH;
order = (fnzb64(size - 1) + 1) - MMU_FRAME_WIDTH;
 
/*
* Use virtual addresses that are beyond the limit of physical memory.
134,8 → 134,10
* by frame_alloc().
*/
ASSERT(PA2KA(last_frame));
uintptr_t virtaddr = ALIGN_UP(PA2KA(last_frame), 1 << (order + FRAME_WIDTH));
last_frame = ALIGN_UP(KA2PA(virtaddr) + size, 1 << (order + FRAME_WIDTH));
uintptr_t virtaddr = ALIGN_UP(PA2KA(last_frame),
1 << (order + FRAME_WIDTH));
last_frame = ALIGN_UP(KA2PA(virtaddr) + size,
1 << (order + FRAME_WIDTH));
for (i = 0; i < sizemap[order].count; i++) {
/*
142,8 → 144,8
* First, insert the mapping into DTLB.
*/
dtlb_insert_mapping(virtaddr + i * sizemap[order].increment,
physaddr + i * sizemap[order].increment,
sizemap[order].pagesize_code, true, false);
physaddr + i * sizemap[order].increment,
sizemap[order].pagesize_code, true, false);
#ifdef CONFIG_SMP
/*
150,11 → 152,11
* Second, save the information about the mapping for APs.
*/
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].virt_page =
virtaddr + i * sizemap[order].increment;
virtaddr + i * sizemap[order].increment;
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].phys_page =
physaddr + i * sizemap[order].increment;
physaddr + i * sizemap[order].increment;
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].pagesize_code =
sizemap[order].pagesize_code;
sizemap[order].pagesize_code;
bsp_locked_dtlb_entries++;
#endif
}
/trunk/uspace/ns/ns.c
83,10 → 83,9
static void *clockaddr = NULL;
static void *klogaddr = NULL;
 
static void get_as_area(ipc_callid_t callid, ipc_call_t *call, char *name, char *colstr, void **addr)
static void get_as_area(ipc_callid_t callid, ipc_call_t *call, char *name, void **addr)
{
void *ph_addr;
int ph_color;
 
if (!*addr) {
ph_addr = (void *) sysinfo_value(name);
94,8 → 93,7
ipc_answer_fast(callid, ENOENT, 0, 0);
return;
}
ph_color = (int) sysinfo_value(colstr);
*addr = as_get_mappable_page(PAGE_SIZE, ph_color);
*addr = as_get_mappable_page(PAGE_SIZE);
physmem_map(ph_addr, *addr, 1, AS_AREA_READ | AS_AREA_CACHEABLE);
}
ipc_answer_fast(callid, 0, (ipcarg_t) *addr, AS_AREA_READ);
119,11 → 117,11
switch (IPC_GET_ARG3(call)) {
case SERVICE_MEM_REALTIME:
get_as_area(callid, &call, "clock.faddr",
"clock.fcolor", &clockaddr);
&clockaddr);
break;
case SERVICE_MEM_KLOG:
get_as_area(callid, &call, "klog.faddr",
"klog.fcolor", &klogaddr);
&klogaddr);
break;
default:
ipc_answer_fast(callid, ENOENT, 0, 0);
/trunk/uspace/fb/main.c
43,8 → 43,7
{
void *dest;
 
dest = as_get_mappable_page(IPC_GET_ARG2(*call),
PAGE_COLOR(IPC_GET_ARG1(*call)));
dest = as_get_mappable_page(IPC_GET_ARG2(*call));
if (ipc_answer_fast(callid, 0, (sysarg_t) dest, 0) == 0) {
if (*area)
as_area_destroy(*area);
/trunk/uspace/fb/fb.c
755,8 → 755,7
case IPC_M_AS_AREA_SEND:
/* We accept one area for data interchange */
if (IPC_GET_ARG1(*call) == shm_id) {
void *dest = as_get_mappable_page(IPC_GET_ARG2(*call),
PAGE_COLOR(IPC_GET_ARG1(*call)));
void *dest = as_get_mappable_page(IPC_GET_ARG2(*call));
shm_size = IPC_GET_ARG2(*call);
if (!ipc_answer_fast(callid, 0, (sysarg_t) dest, 0))
shm = dest;
1369,8 → 1368,7
fb_invert_colors = sysinfo_value("fb.invert-colors");
 
asz = fb_scanline * fb_height;
fb_addr = as_get_mappable_page(asz, (int)
sysinfo_value("fb.address.color"));
fb_addr = as_get_mappable_page(asz);
physmem_map(fb_ph_addr, fb_addr, ALIGN_UP(asz, PAGE_SIZE) >>
PAGE_WIDTH, AS_AREA_READ | AS_AREA_WRITE);
/trunk/uspace/fb/ega.c
315,8 → 315,7
iospace_enable(task_get_id(), (void *) EGA_IO_ADDRESS, 2);
 
sz = scr_width * scr_height * 2;
scr_addr = as_get_mappable_page(sz, (int)
sysinfo_value("fb.address.color"));
scr_addr = as_get_mappable_page(sz);
 
physmem_map(ega_ph_addr, scr_addr, ALIGN_UP(sz, PAGE_SIZE) >>
PAGE_WIDTH, AS_AREA_READ | AS_AREA_WRITE);
/trunk/uspace/klog/klog.c
62,7 → 62,7
 
printf("Kernel console output.\n");
mapping = as_get_mappable_page(PAGE_SIZE, sysinfo_value("klog.fcolor"));
mapping = as_get_mappable_page(PAGE_SIZE);
res = ipc_call_sync_3(PHONE_NS, IPC_M_AS_AREA_RECV,
(sysarg_t) mapping, PAGE_SIZE, SERVICE_MEM_KLOG,
NULL, NULL, NULL);
/trunk/uspace/rd/rd.c
73,12 → 73,11
{
size_t rd_size = sysinfo_value("rd.size");
void * rd_ph_addr = (void *) sysinfo_value("rd.address.physical");
int rd_color = (int) sysinfo_value("rd.address.color");
if (rd_size == 0)
return false;
void * rd_addr = as_get_mappable_page(rd_size, rd_color);
void * rd_addr = as_get_mappable_page(rd_size);
physmem_map(rd_ph_addr, rd_addr, ALIGN_UP(rd_size, PAGE_SIZE) >> PAGE_WIDTH, AS_AREA_READ | AS_AREA_WRITE);
/trunk/uspace/libc/include/as.h
40,13 → 40,11
#include <kernel/mm/as.h>
#include <libarch/config.h>
 
#define PAGE_COLOR(va) (((va) >> PAGE_WIDTH) & ((1 << PAGE_COLOR_BITS) - 1))
 
extern void *as_area_create(void *address, size_t size, int flags);
extern int as_area_resize(void *address, size_t size, int flags);
extern int as_area_destroy(void *address);
extern void *set_maxheapsize(size_t mhs);
extern void * as_get_mappable_page(size_t sz, int color);
extern void * as_get_mappable_page(size_t sz);
 
#endif
 
/trunk/uspace/libc/generic/time.c
72,12 → 72,11
int res;
 
if (!ktime) {
mapping = as_get_mappable_page(PAGE_SIZE, (int)
sysinfo_value("clock.fcolor"));
mapping = as_get_mappable_page(PAGE_SIZE);
/* Get the mapping of kernel clock */
res = ipc_call_sync_3(PHONE_NS, IPC_M_AS_AREA_RECV, (sysarg_t)
mapping, PAGE_SIZE, SERVICE_MEM_REALTIME, NULL, &rights,
NULL);
res = ipc_call_sync_3(PHONE_NS, IPC_M_AS_AREA_RECV,
(sysarg_t) mapping, PAGE_SIZE, SERVICE_MEM_REALTIME, NULL,
&rights, NULL);
if (res) {
printf("Failed to initialize timeofday memarea\n");
_exit(1);
/trunk/uspace/libc/generic/as.c
55,7 → 55,7
void *as_area_create(void *address, size_t size, int flags)
{
return (void *) __SYSCALL3(SYS_AS_AREA_CREATE, (sysarg_t ) address,
(sysarg_t) size, (sysarg_t) flags);
(sysarg_t) size, (sysarg_t) flags);
}
 
/** Resize address space area.
69,8 → 69,8
*/
int as_area_resize(void *address, size_t size, int flags)
{
return __SYSCALL3(SYS_AS_AREA_RESIZE, (sysarg_t ) address, (sysarg_t)
size, (sysarg_t) flags);
return __SYSCALL3(SYS_AS_AREA_RESIZE, (sysarg_t ) address,
(sysarg_t) size, (sysarg_t) flags);
}
 
/** Destroy address space area.
143,7 → 143,6
/** Return pointer to some unmapped area, where fits new as_area
*
* @param sz Requested size of the allocation.
* @param color Requested virtual color of the allocation.
*
* @return Pointer to the beginning
*
150,7 → 149,7
* TODO: make some first_fit/... algorithm, we are now just incrementing
* the pointer to last area
*/
void *as_get_mappable_page(size_t sz, int color)
void *as_get_mappable_page(size_t sz)
{
void *res;
uint64_t asz;
166,21 → 165,16
set_maxheapsize(MAX_HEAP_SIZE);
/*
* Make sure we allocate from naturally aligned address and a page of
* appropriate color.
* Make sure we allocate from naturally aligned address.
*/
i = 0;
do {
if (!last_allocated) {
last_allocated = (void *) ALIGN_UP((void *) &_heap +
maxheapsize, asz);
} else {
last_allocated = (void *) ALIGN_UP(((uintptr_t)
last_allocated) + (int) (i > 0), asz);
}
} while ((asz < (1 << (PAGE_COLOR_BITS + PAGE_WIDTH))) &&
(PAGE_COLOR((uintptr_t) last_allocated) != color) &&
(++i < (1 << PAGE_COLOR_BITS)));
if (!last_allocated) {
last_allocated = (void *) ALIGN_UP((void *) &_heap +
maxheapsize, asz);
} else {
last_allocated = (void *) ALIGN_UP(((uintptr_t)
last_allocated) + (int) (i > 0), asz);
}
 
res = last_allocated;
last_allocated += ALIGN_UP(sz, PAGE_SIZE);
/trunk/uspace/libc/generic/mman.c
36,10 → 36,11
#include <as.h>
#include <unistd.h>
 
void *mmap(void *start, size_t length, int prot, int flags, int fd, off_t offset)
void *mmap(void *start, size_t length, int prot, int flags, int fd,
off_t offset)
{
if (!start)
start = as_get_mappable_page(length, 0);
start = as_get_mappable_page(length);
// if (! ((flags & MAP_SHARED) ^ (flags & MAP_PRIVATE)))
// return MAP_FAILED;
/trunk/uspace/libc/arch/sparc64/_link.ld.in
7,9 → 7,9
}
 
SECTIONS {
. = 0x2000;
. = 0x4000;
 
.init ALIGN(0x2000) : SUBALIGN(0x2000) {
.init ALIGN(0x4000) : SUBALIGN(0x4000) {
*(.init);
} :text
.text : {
17,11 → 17,11
*(.rodata*);
} :text
.got ALIGN(0x2000) : SUBALIGN(0x2000) {
.got ALIGN(0x4000) : SUBALIGN(0x4000) {
_gp = .;
*(.got*);
} :data
.data ALIGN(0x2000) : SUBALIGN(0x2000) {
.data ALIGN(0x4000) : SUBALIGN(0x4000) {
*(.data);
*(.sdata);
} :data
41,7 → 41,7
*(.bss);
} :data
 
. = ALIGN(0x2000);
. = ALIGN(0x4000);
_heap = .;
/DISCARD/ : {
/trunk/uspace/libc/arch/sparc64/include/config.h
35,9 → 35,9
#ifndef LIBC_sparc64_CONFIG_H_
#define LIBC_sparc64_CONFIG_H_
 
#define PAGE_WIDTH 13
#define PAGE_SIZE (1<<PAGE_WIDTH)
#define PAGE_COLOR_BITS 1 /**< Bit 13 is the page color. */
#define PAGE_WIDTH 14
#define PAGE_SIZE (1 << PAGE_WIDTH)
#define PAGE_COLOR_BITS 0 /**< Only one page color. */
 
#endif
 
/trunk/uspace/libc/arch/sparc64/include/stack.h
43,7 → 43,7
/**
* 16-extended-word save area for %i[0-7] and %l[0-7] registers.
*/
#define STACK_WINDOW_SAVE_AREA_SIZE (16*STACK_ITEM_SIZE)
#define STACK_WINDOW_SAVE_AREA_SIZE (16 * STACK_ITEM_SIZE)
 
/**
* By convention, the actual top of the stack is %sp + STACK_BIAS.
/trunk/boot/arch/sparc64/loader/asm.h
33,8 → 33,8
#include "types.h"
#include "main.h"
 
#define PAGE_SIZE 8192
#define PAGE_WIDTH 13
#define PAGE_WIDTH 14
#define PAGE_SIZE (1 << PAGE_WIDTH)
 
#define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt))