Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 2014 → Rev 2015

/trunk/kernel/genarch/src/fb/fb.c
45,9 → 45,12
#include <config.h>
#include <bitops.h>
#include <print.h>
#include <ddi/ddi.h>
 
#include "helenos.xbm"
 
static parea_t fb_parea; /**< Physical memory area for fb. */
 
SPINLOCK_INITIALIZE(fb_lock);
 
static uint8_t *fbaddress = NULL;
434,6 → 437,12
rows = y / FONT_SCANLINES;
columns = x / COL_WIDTH;
 
fb_parea.pbase = (uintptr_t) addr;
fb_parea.vbase = (uintptr_t) fbaddress;
fb_parea.frames = SIZE2FRAMES(fbsize);
fb_parea.cacheable = false;
ddi_parea_register(&fb_parea);
 
sysinfo_set_item_val("fb", NULL, true);
sysinfo_set_item_val("fb.kind", NULL, 1);
sysinfo_set_item_val("fb.width", NULL, xres);
441,6 → 450,8
sysinfo_set_item_val("fb.scanline", NULL, scan);
sysinfo_set_item_val("fb.visual", NULL, visual);
sysinfo_set_item_val("fb.address.physical", NULL, addr);
sysinfo_set_item_val("fb.address.color", NULL, PAGE_COLOR((uintptr_t)
fbaddress));
sysinfo_set_item_val("fb.invert-colors", NULL, invert_colors);
 
/* Allocate double buffer */
/trunk/kernel/generic/include/ddi/ddi.h
39,8 → 39,19
#include <arch/types.h>
#include <typedefs.h>
 
unative_t sys_physmem_map(unative_t phys_base, unative_t virt_base, unative_t pages,
unative_t flags);
/** Structure representing contiguous physical memory area. */
typedef struct {
uintptr_t pbase; /**< Physical base of the area. */
uintptr_t vbase; /**< Virtual base of the area. */
count_t frames; /**< Number of frames in the area. */
bool cacheable; /**< Cacheability. */
} parea_t;
 
extern void ddi_init(void);
extern void ddi_parea_register(parea_t *parea);
 
extern unative_t sys_physmem_map(unative_t phys_base, unative_t virt_base,
unative_t pages, unative_t flags);
extern unative_t sys_iospace_enable(ddi_ioarg_t *uspace_io_arg);
extern unative_t sys_preempt_control(int enable);
 
/trunk/kernel/generic/include/mm/page.h
70,7 → 70,7
/**
* Macro for computing page color.
*/
#define PAGE_COLOR(va) (((va) >> PAGE_WIDTH) & ((1 << PAGE_COLOR_BITS) - 1))
#define PAGE_COLOR(va) (((va) >> PAGE_WIDTH) & ((1 << PAGE_COLOR_BITS) - 1))
 
/** Page fault access type. */
enum pf_access {
82,7 → 82,8
 
/** Operations to manipulate page mappings. */
struct page_mapping_operations {
void (* mapping_insert)(as_t *as, uintptr_t page, uintptr_t frame, int flags);
void (* mapping_insert)(as_t *as, uintptr_t page, uintptr_t frame, int
flags);
void (* mapping_remove)(as_t *as, uintptr_t page);
pte_t *(* mapping_find)(as_t *as, uintptr_t page);
};
93,7 → 94,8
extern void page_init(void);
extern void page_table_lock(as_t *as, bool lock);
extern void page_table_unlock(as_t *as, bool unlock);
extern void page_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int flags);
extern void page_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int
flags);
extern void page_mapping_remove(as_t *as, uintptr_t page);
extern pte_t *page_mapping_find(as_t *as, uintptr_t page);
extern pte_t *page_table_create(int flags);
/trunk/kernel/generic/include/mm/as.h
94,11 → 94,6
/** Address space identifier. Constant on architectures that do not support ASIDs.*/
asid_t asid;
#ifdef CONFIG_VIRT_IDX_DCACHE
bool dcache_flush_on_install;
bool dcache_flush_on_deinstall;
#endif /* CONFIG_VIRT_IDX_DCACHE */
 
/** Architecture specific content. */
as_arch_t arch;
};
165,12 → 160,6
 
/** Data to be used by the backend. */
mem_backend_data_t backend_data;
 
/**
* Virtual color of the original address space area that was at the beginning
* of the share chain.
*/
int orig_color;
};
 
extern as_t *AS_KERNEL;
/trunk/kernel/generic/src/main/main.c
80,6 → 80,7
#include <adt/btree.h>
#include <console/klog.h>
#include <smp/smp.h>
#include <ddi/ddi.h>
 
/** Global configuration structure. */
config_t config;
102,12 → 103,15
* the linker or the low level assembler code with
* appropriate sizes and addresses.
*/
uintptr_t hardcoded_load_address = 0; /**< Virtual address of where the kernel is loaded. */
size_t hardcoded_ktext_size = 0; /**< Size of the kernel code in bytes. */
size_t hardcoded_kdata_size = 0; /**< Size of the kernel data in bytes. */
uintptr_t hardcoded_load_address = 0; /**< Virtual address of where the kernel
* is loaded. */
size_t hardcoded_ktext_size = 0; /**< Size of the kernel code in bytes.
*/
size_t hardcoded_kdata_size = 0; /**< Size of the kernel data in bytes.
*/
uintptr_t stack_safe = 0; /**< Lowest safe stack virtual address.
*/
 
uintptr_t stack_safe = 0; /**< Lowest safe stack virtual address */
 
void main_bsp(void);
void main_ap(void);
 
141,7 → 145,8
config.base = hardcoded_load_address;
config.memory_size = get_memory_size();
config.kernel_size = ALIGN_UP(hardcoded_ktext_size + hardcoded_kdata_size, PAGE_SIZE);
config.kernel_size = ALIGN_UP(hardcoded_ktext_size +
hardcoded_kdata_size, PAGE_SIZE);
config.stack_size = CONFIG_STACK_SIZE;
/* Initialy the stack is placed just after the kernel */
150,14 → 155,18
/* Avoid placing stack on top of init */
count_t i;
for (i = 0; i < init.cnt; i++) {
if (PA_overlaps(config.stack_base, config.stack_size, init.tasks[i].addr, init.tasks[i].size))
config.stack_base = ALIGN_UP(init.tasks[i].addr + init.tasks[i].size, config.stack_size);
if (PA_overlaps(config.stack_base, config.stack_size,
init.tasks[i].addr, init.tasks[i].size))
config.stack_base = ALIGN_UP(init.tasks[i].addr +
init.tasks[i].size, config.stack_size);
}
 
/* Avoid placing stack on top of boot allocations. */
if (ballocs.size) {
if (PA_overlaps(config.stack_base, config.stack_size, ballocs.base, ballocs.size))
config.stack_base = ALIGN_UP(ballocs.base + ballocs.size, PAGE_SIZE);
if (PA_overlaps(config.stack_base, config.stack_size,
ballocs.base, ballocs.size))
config.stack_base = ALIGN_UP(ballocs.base +
ballocs.size, PAGE_SIZE);
}
if (config.stack_base < stack_safe)
164,7 → 173,8
config.stack_base = ALIGN_UP(stack_safe, PAGE_SIZE);
context_save(&ctx);
context_set(&ctx, FADDR(main_bsp_separated_stack), config.stack_base, THREAD_STACK_SIZE);
context_set(&ctx, FADDR(main_bsp_separated_stack), config.stack_base,
THREAD_STACK_SIZE);
context_restore(&ctx);
/* not reached */
}
200,22 → 210,28
* Memory management subsystems initialization.
*/
arch_pre_mm_init();
frame_init(); /* Initialize at least 1 memory segment big enough for slab to work */
frame_init();
/* Initialize at least 1 memory segment big enough for slab to work. */
slab_cache_init();
btree_init();
as_init();
page_init();
tlb_init();
ddi_init();
arch_post_mm_init();
 
version_print();
printf("kernel: %.*p hardcoded_ktext_size=%zdK, hardcoded_kdata_size=%zdK\n", sizeof(uintptr_t) * 2, config.base, hardcoded_ktext_size >> 10, hardcoded_kdata_size >> 10);
printf("stack: %.*p size=%zdK\n", sizeof(uintptr_t) * 2, config.stack_base, config.stack_size >> 10);
printf("kernel: %.*p hardcoded_ktext_size=%zdK, "
"hardcoded_kdata_size=%zdK\n", sizeof(uintptr_t) * 2,
config.base, hardcoded_ktext_size >> 10, hardcoded_kdata_size >>
10);
printf("stack: %.*p size=%zdK\n", sizeof(uintptr_t) * 2,
config.stack_base, config.stack_size >> 10);
 
arch_pre_smp_init();
smp_init();
slab_enable_cpucache(); /* Slab must be initialized AFTER we know the number of processors */
/* Slab must be initialized after we know the number of processors. */
slab_enable_cpucache();
 
printf("config.memory_size=%zdM\n", config.memory_size >> 20);
printf("config.cpu_count=%zd\n", config.cpu_count);
232,7 → 248,9
if (init.cnt > 0) {
for (i = 0; i < init.cnt; i++)
printf("init[%zd].addr=%.*p, init[%zd].size=%zd\n", i, sizeof(uintptr_t) * 2, init.tasks[i].addr, i, init.tasks[i].size);
printf("init[%zd].addr=%.*p, init[%zd].size=%zd\n", i,
sizeof(uintptr_t) * 2, init.tasks[i].addr, i,
init.tasks[i].size);
} else
printf("No init binaries found\n");
304,7 → 322,8
* collide with another CPU coming up. To prevent this, we
* switch to this cpu's private stack prior to waking kmp up.
*/
context_set(&CPU->saved_context, FADDR(main_ap_separated_stack), (uintptr_t) CPU->stack, CPU_STACK_SIZE);
context_set(&CPU->saved_context, FADDR(main_ap_separated_stack),
(uintptr_t) CPU->stack, CPU_STACK_SIZE);
context_restore(&CPU->saved_context);
/* not reached */
}
/trunk/kernel/generic/src/sysinfo/sysinfo.c
230,21 → 230,24
printf(" ");
switch (root->val_type) {
case SYSINFO_VAL_UNDEFINED:
val = 0;
vtype = "UND";
break;
case SYSINFO_VAL_VAL:
val = root->val.val;
vtype = "VAL";
break;
case SYSINFO_VAL_FUNCTION:
val = ((sysinfo_val_fn_t) (root->val.fn)) (root);
vtype = "FUN";
break;
case SYSINFO_VAL_UNDEFINED:
val = 0;
vtype = "UND";
break;
case SYSINFO_VAL_VAL:
val = root->val.val;
vtype = "VAL";
break;
case SYSINFO_VAL_FUNCTION:
val = ((sysinfo_val_fn_t) (root->val.fn)) (root);
vtype = "FUN";
break;
}
printf("%s %s val:%d(%x) sub:%s\n", root->name, vtype, val, val, (root->subinfo_type == SYSINFO_SUBINFO_NONE) ? "NON" : ((root->subinfo_type == SYSINFO_SUBINFO_TABLE) ? "TAB" : "FUN"));
printf("%s %s val:%d(%x) sub:%s\n", root->name, vtype, val,
val, (root->subinfo_type == SYSINFO_SUBINFO_NONE) ?
"NON" : ((root->subinfo_type == SYSINFO_SUBINFO_TABLE) ?
"TAB" : "FUN"));
if (root->subinfo_type == SYSINFO_SUBINFO_TABLE)
sysinfo_dump(&(root -> subinfo.table), depth + 1);
/trunk/kernel/generic/src/time/clock.c
54,7 → 54,12
#include <proc/thread.h>
#include <sysinfo/sysinfo.h>
#include <arch/barrier.h>
#include <mm/frame.h>
#include <ddi/ddi.h>
 
/** Physical memory area of the real time clock. */
static parea_t clock_parea;
 
/* Pointers to public variables with time */
struct ptime {
unative_t seconds1;
72,18 → 77,16
* The applications (and sometimes kernel) need to access accurate
* information about realtime data. We allocate 1 page with these
* data and update it periodically.
*
*
*/
void clock_counter_init(void)
{
void *faddr;
 
faddr = frame_alloc(0, FRAME_ATOMIC);
faddr = frame_alloc(ONE_FRAME, FRAME_ATOMIC);
if (!faddr)
panic("Cannot allocate page for clock");
public_time = (struct ptime *)PA2KA(faddr);
public_time = (struct ptime *) PA2KA(faddr);
 
/* TODO: We would need some arch dependent settings here */
public_time->seconds1 = 0;
90,7 → 93,20
public_time->seconds2 = 0;
public_time->useconds = 0;
 
sysinfo_set_item_val("clock.faddr", NULL, (unative_t)faddr);
clock_parea.pbase = (uintptr_t) faddr;
clock_parea.vbase = (uintptr_t) public_time;
clock_parea.frames = 1;
clock_parea.cacheable = true;
ddi_parea_register(&clock_parea);
 
/*
* Prepare information for the userspace so that it can successfully
* physmem_map() the clock_parea.
*/
sysinfo_set_item_val("clock.cacheable", NULL, (unative_t) true);
sysinfo_set_item_val("clock.fcolor", NULL, (unative_t)
PAGE_COLOR(clock_parea.vbase));
sysinfo_set_item_val("clock.faddr", NULL, (unative_t) faddr);
}
 
 
/trunk/kernel/generic/src/ddi/ddi.c
47,20 → 47,60
#include <mm/as.h>
#include <synch/spinlock.h>
#include <syscall/copy.h>
#include <adt/btree.h>
#include <arch.h>
#include <align.h>
#include <errno.h>
 
/** This lock protects the parea_btree. */
SPINLOCK_INITIALIZE(parea_lock);
 
/** B+tree with enabled physical memory areas. */
static btree_t parea_btree;
 
/** Initialize DDI. */
void ddi_init(void)
{
btree_create(&parea_btree);
}
 
/** Enable piece of physical memory for mapping by physmem_map().
*
* @param parea Pointer to physical area structure.
*
* @todo This function doesn't check for overlaps. It depends on the kernel to
* create disjunct physical memory areas.
*/
void ddi_parea_register(parea_t *parea)
{
ipl_t ipl;
 
ipl = interrupts_disable();
spinlock_lock(&parea_lock);
/*
* TODO: we should really check for overlaps here.
* However, we should be safe because the kernel is pretty sane and
* memory of different devices doesn't overlap.
*/
btree_insert(&parea_btree, (btree_key_t) parea->pbase, parea, NULL);
 
spinlock_unlock(&parea_lock);
interrupts_restore(ipl);
}
 
/** Map piece of physical memory into virtual address space of current task.
*
* @param pf Physical frame address of the starting frame.
* @param vp Virtual page address of the starting page.
* @param pf Physical address of the starting frame.
* @param vp Virtual address of the starting page.
* @param pages Number of pages to map.
* @param flags Address space area flags for the mapping.
*
* @return 0 on success, EPERM if the caller lacks capabilities to use this syscall,
* ENOENT if there is no task matching the specified ID and ENOMEM if
* there was a problem in creating address space area.
* @return 0 on success, EPERM if the caller lacks capabilities to use this
* syscall, ENOENT if there is no task matching the specified ID or the
* physical address space is not enabled for mapping and ENOMEM if there
* was a problem in creating address space area. ENOTSUP is returned when
* an attempt to create an illegal address alias is detected.
*/
static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, count_t pages, int flags)
{
79,6 → 119,40
return EPERM;
 
ipl = interrupts_disable();
 
/*
* Check if the physical memory area is enabled for mapping.
* If the architecture supports virtually indexed caches, intercept
* attempts to create an illegal address alias.
*/
spinlock_lock(&parea_lock);
parea_t *parea;
btree_node_t *nodep;
parea = btree_search(&parea_btree, (btree_key_t) pf, &nodep);
if (!parea || parea->frames < pages || ((flags & AS_AREA_CACHEABLE) &&
!parea->cacheable) || (!(flags & AS_AREA_CACHEABLE) &&
parea->cacheable)) {
/*
* This physical memory area cannot be mapped.
*/
spinlock_unlock(&parea_lock);
interrupts_restore(ipl);
return ENOENT;
}
 
#ifdef CONFIG_VIRT_IDX_DCACHE
if (PAGE_COLOR(parea->vbase) != PAGE_COLOR(vp)) {
/*
* Refuse to create an illegal address alias.
*/
spinlock_unlock(&parea_lock);
interrupts_restore(ipl);
return ENOTSUP;
}
#endif /* CONFIG_VIRT_IDX_DCACHE */
 
spinlock_unlock(&parea_lock);
 
spinlock_lock(&TASK->lock);
if (!as_area_create(TASK->as, flags, pages * PAGE_SIZE, vp, AS_AREA_ATTR_NONE,
107,8 → 181,8
* @param ioaddr Starting I/O address.
* @param size Size of the enabled I/O space..
*
* @return 0 on success, EPERM if the caller lacks capabilities to use this syscall,
* ENOENT if there is no task matching the specified ID.
* @return 0 on success, EPERM if the caller lacks capabilities to use this
* syscall, ENOENT if there is no task matching the specified ID.
*/
static int ddi_iospace_enable(task_id_t id, uintptr_t ioaddr, size_t size)
{
160,12 → 234,12
*
* @return 0 on success, otherwise it returns error code found in errno.h
*/
unative_t sys_physmem_map(unative_t phys_base, unative_t virt_base, unative_t pages,
unative_t flags)
unative_t sys_physmem_map(unative_t phys_base, unative_t virt_base, unative_t
pages, unative_t flags)
{
return (unative_t) ddi_physmem_map(ALIGN_DOWN((uintptr_t) phys_base, FRAME_SIZE),
ALIGN_DOWN((uintptr_t) virt_base, PAGE_SIZE), (count_t) pages,
(int) flags);
return (unative_t) ddi_physmem_map(ALIGN_DOWN((uintptr_t) phys_base,
FRAME_SIZE), ALIGN_DOWN((uintptr_t) virt_base, PAGE_SIZE),
(count_t) pages, (int) flags);
}
 
/** Wrapper for SYS_ENABLE_IOSPACE syscall.
183,14 → 257,15
if (rc != 0)
return (unative_t) rc;
return (unative_t) ddi_iospace_enable((task_id_t) arg.task_id, (uintptr_t) arg.ioaddr, (size_t) arg.size);
return (unative_t) ddi_iospace_enable((task_id_t) arg.task_id,
(uintptr_t) arg.ioaddr, (size_t) arg.size);
}
 
/** Disable or enable preemption.
*
* @param enable If non-zero, the preemption counter will be decremented, leading to potential
* enabling of preemption. Otherwise the preemption counter will be incremented,
* preventing preemption from occurring.
* @param enable If non-zero, the preemption counter will be decremented,
* leading to potential enabling of preemption. Otherwise the preemption
* counter will be incremented, preventing preemption from occurring.
*
* @return Zero on success or EPERM if callers capabilities are not sufficient.
*/
/trunk/kernel/generic/src/console/klog.c
38,13 → 38,17
#include <print.h>
#include <ddi/device.h>
#include <ddi/irq.h>
#include <ddi/ddi.h>
#include <ipc/irq.h>
 
/** Physical memory area used for klog. */
static parea_t klog_parea;
/*
* For now, we use 0 as INR.
* However, on some architectures 0 is the clock interrupt (e.g. amd64 and ia32).
* It is therefore desirable to have architecture specific definition of KLOG_VIRT_INR
* in the future.
* However, on some architectures 0 is the clock interrupt (e.g. amd64 and
* ia32). It is therefore desirable to have architecture specific definition of
* KLOG_VIRT_INR in the future.
*/
#define KLOG_VIRT_INR 0
 
75,11 → 79,19
faddr = frame_alloc(KLOG_ORDER, FRAME_ATOMIC);
if (!faddr)
panic("Cannot allocate page for klog");
klog = (char *)PA2KA(faddr);
klog = (char *) PA2KA(faddr);
devno_t devno = device_assign_devno();
sysinfo_set_item_val("klog.faddr", NULL, (unative_t)faddr);
klog_parea.pbase = (uintptr_t) faddr;
klog_parea.vbase = (uintptr_t) klog;
klog_parea.frames = 1 << KLOG_ORDER;
klog_parea.cacheable = true;
ddi_parea_register(&klog_parea);
 
sysinfo_set_item_val("klog.faddr", NULL, (unative_t) faddr);
sysinfo_set_item_val("klog.fcolor", NULL, (unative_t)
PAGE_COLOR((uintptr_t) klog));
sysinfo_set_item_val("klog.pages", NULL, 1 << KLOG_ORDER);
sysinfo_set_item_val("klog.devno", NULL, devno);
sysinfo_set_item_val("klog.inr", NULL, KLOG_VIRT_INR);
/trunk/kernel/generic/src/lib/rd.c
41,11 → 41,15
#include <arch/byteorder.h>
#include <mm/frame.h>
#include <sysinfo/sysinfo.h>
#include <ddi/ddi.h>
 
static parea_t rd_parea; /**< Physical memory area for rd. */
 
int init_rd(rd_header * header, size_t size)
{
/* Identify RAM disk */
if ((header->magic[0] != RD_MAG0) || (header->magic[1] != RD_MAG1) || (header->magic[2] != RD_MAG2) || (header->magic[3] != RD_MAG3))
if ((header->magic[0] != RD_MAG0) || (header->magic[1] != RD_MAG1) ||
(header->magic[2] != RD_MAG2) || (header->magic[3] != RD_MAG3))
return RE_INVALID;
/* Identify version */
76,9 → 80,18
if ((uint64_t) hsize + dsize > size)
dsize = size - hsize;
rd_parea.pbase = KA2PA((void *) header + hsize);
rd_parea.vbase = (uintptr_t) ((void *) header + hsize);
rd_parea.frames = SIZE2FRAMES(dsize);
rd_parea.cacheable = true;
ddi_parea_register(&rd_parea);
 
sysinfo_set_item_val("rd", NULL, true);
sysinfo_set_item_val("rd.size", NULL, dsize);
sysinfo_set_item_val("rd.address.physical", NULL, (unative_t) KA2PA((void *) header + hsize));
sysinfo_set_item_val("rd.address.physical", NULL, (unative_t)
KA2PA((void *) header + hsize));
sysinfo_set_item_val("rd.address.color", NULL, (unative_t)
PAGE_COLOR((uintptr_t) header + hsize));
 
return RE_OK;
}
/trunk/kernel/generic/src/mm/as.c
166,11 → 166,6
as->cpu_refcount = 0;
as->page_table = page_table_create(flags);
 
#ifdef CONFIG_VIRT_IDX_DCACHE
as->dcache_flush_on_install = false;
as->dcache_flush_on_deinstall = false;
#endif /* CONFIG_VIRT_IDX_DCACHE */
 
return as;
}
 
278,18 → 273,6
else
memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data), 0);
 
#ifdef CONFIG_VIRT_IDX_DCACHE
/*
* When the area is being created with the AS_AREA_ATTR_PARTIAL flag, the
* orig_color is probably wrong until the flag is reset. In other words, it is
* initialized with the color of the area being created and not with the color
* of the original address space area at the beginning of the share chain. Of
* course, the correct color is set by as_area_share() before the flag is
* reset.
*/
a->orig_color = PAGE_COLOR(base);
#endif /* CONFIG_VIRT_IDX_DCACHE */
 
btree_create(&a->used_space);
btree_insert(&as->as_area_btree, base, (void *) a, NULL);
575,7 → 558,8
* such address space area, EPERM if there was a problem in accepting the area
* or ENOMEM if there was a problem in allocating destination address space
* area. ENOTSUP is returned if the address space area backend does not support
* sharing.
* sharing or if the kernel detects an attempt to create an illegal address
* alias.
*/
int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
as_t *dst_as, uintptr_t dst_base, int dst_flags_mask)
583,7 → 567,6
ipl_t ipl;
int src_flags;
size_t src_size;
int src_orig_color;
as_area_t *src_area, *dst_area;
share_info_t *sh_info;
mem_backend_t *src_backend;
600,7 → 583,6
interrupts_restore(ipl);
return ENOENT;
}
 
if (!src_area->backend || !src_area->backend->share) {
/*
617,7 → 599,6
src_flags = src_area->flags;
src_backend = src_area->backend;
src_backend_data = src_area->backend_data;
src_orig_color = src_area->orig_color;
 
/* Share the cacheable flag from the original mapping */
if (src_flags & AS_AREA_CACHEABLE)
630,6 → 611,20
return EPERM;
}
 
#ifdef CONFIG_VIRT_IDX_DCACHE
if (!(dst_flags_mask & AS_AREA_EXEC)) {
if (PAGE_COLOR(src_area->base) != PAGE_COLOR(dst_base)) {
/*
* Refuse to create an illegal address alias.
*/
mutex_unlock(&src_area->lock);
mutex_unlock(&src_as->lock);
interrupts_restore(ipl);
return ENOTSUP;
}
}
#endif /* CONFIG_VIRT_IDX_DCACHE */
 
/*
* Now we are committed to sharing the area.
* First, prepare the area for sharing.
682,26 → 677,6
mutex_lock(&dst_area->lock);
dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL;
dst_area->sh_info = sh_info;
dst_area->orig_color = src_orig_color;
#ifdef CONFIG_VIRT_IDX_DCACHE
if (src_orig_color != PAGE_COLOR(dst_base)) {
/*
* We have just detected an attempt to create an invalid address
* alias. We allow this and set a special flag that tells the
* architecture specific code to flush the D-cache when the
* offending address space is installed and deinstalled
* (cleanup).
*
* In order for the flags to take effect immediately, we also
* perform a global D-cache shootdown.
*/
dcache_shootdown_start();
dst_as->dcache_flush_on_install = true;
dst_as->dcache_flush_on_deinstall = true;
dcache_flush();
dcache_shootdown_finalize();
}
#endif /* CONFIG_VIRT_IDX_DCACHE */
mutex_unlock(&dst_area->lock);
mutex_unlock(&dst_as->lock);
 
/trunk/kernel/arch/sparc64/include/interrupt.h
47,8 → 47,7
#define VECTOR_TLB_SHOOTDOWN_IPI 0
 
enum {
IPI_TLB_SHOOTDOWN = VECTOR_TLB_SHOOTDOWN_IPI,
IPI_DCACHE_SHOOTDOWN
IPI_TLB_SHOOTDOWN = VECTOR_TLB_SHOOTDOWN_IPI
};
 
struct istate {
/trunk/kernel/arch/sparc64/include/mm/cache.h
35,16 → 35,6
#ifndef KERN_sparc64_CACHE_H_
#define KERN_sparc64_CACHE_H_
 
#ifdef CONFIG_SMP
extern void dcache_shootdown_start(void);
extern void dcache_shootdown_finalize(void);
extern void dcache_shootdown_ipi_recv(void);
#else /* CONFIG_SMP */
#define dcache_shootdown_start();
#define dcache_shootdown_finalize();
#define dcache_shootdown_ipi_recv();
#endif /* CONFIG_SMP */
 
extern void dcache_flush(void);
 
#endif
/trunk/kernel/arch/sparc64/src/smp/ipi.c
38,7 → 38,6
#include <arch/asm.h>
#include <config.h>
#include <mm/tlb.h>
#include <arch/mm/cache.h>
#include <arch/interrupt.h>
#include <arch/trap/interrupt.h>
#include <arch/barrier.h>
121,9 → 120,6
case IPI_TLB_SHOOTDOWN:
func = tlb_shootdown_ipi_recv;
break;
case IPI_DCACHE_SHOOTDOWN:
func = dcache_shootdown_ipi_recv;
break;
default:
panic("Unknown IPI (%d).\n", ipi);
break;
/trunk/kernel/arch/sparc64/src/trap/interrupt.c
44,7 → 44,6
#include <print.h>
#include <arch.h>
#include <mm/tlb.h>
#include <arch/mm/cache.h>
#include <config.h>
#include <synch/spinlock.h>
 
91,8 → 90,6
#ifdef CONFIG_SMP
if (data0 == (uintptr_t) tlb_shootdown_ipi_recv) {
tlb_shootdown_ipi_recv();
} else if (data0 == (uintptr_t) dcache_shootdown_ipi_recv) {
dcache_shootdown_ipi_recv();
}
#endif
} else {
/trunk/kernel/arch/sparc64/src/mm/as.c
49,10 → 49,6
#include <macros.h>
#endif /* CONFIG_TSB */
 
#ifdef CONFIG_VIRT_IDX_DCACHE
#include <arch/mm/cache.h>
#endif /* CONFIG_VIRT_IDX_DCACHE */
 
/** Architecture dependent address space init. */
void as_arch_init(void)
{
162,23 → 158,6
tsb_base.base = ((uintptr_t) as->arch.dtsb) >> PAGE_WIDTH;
dtsb_base_write(tsb_base.value);
#endif
#ifdef CONFIG_VIRT_IDX_DCACHE
if (as->dcache_flush_on_install) {
/*
* Some mappings in this address space are illegal address
* aliases. Upon their creation, the dcache_flush_on_install
* flag was set.
*
* We are now obliged to flush the D-cache in order to guarantee
* that there will be at most one cache line for each address
* alias.
*
* This flush performs a cleanup after another address space in
* which the alias might have existed.
*/
dcache_flush();
}
#endif /* CONFIG_VIRT_IDX_DCACHE */
}
 
/** Perform sparc64-specific tasks when an address space is removed from the processor.
213,26 → 192,6
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb);
}
#endif
#ifdef CONFIG_VIRT_IDX_DCACHE
if (as->dcache_flush_on_deinstall) {
/*
* Some mappings in this address space are illegal address
* aliases. Upon their creation, the dcache_flush_on_deinstall
* flag was set.
*
* We are now obliged to flush the D-cache in order to guarantee
* that there will be at most one cache line for each address
* alias.
*
* This flush performs a cleanup after this address space. It is
* necessary because other address spaces that contain the same
* alias are not necessarily aware of the need to carry out the
* cache flush. The only address spaces that are aware of it are
* those that created the illegal alias.
*/
dcache_flush();
}
#endif /* CONFIG_VIRT_IDX_DCACHE */
}
 
/** @}
/trunk/kernel/arch/sparc64/src/mm/cache.c
31,68 → 31,10
*/
/**
* @file
* @brief D-cache shootdown algorithm.
*/
 
#include <arch/mm/cache.h>
 
#ifdef CONFIG_SMP
 
#include <smp/ipi.h>
#include <arch/interrupt.h>
#include <synch/spinlock.h>
#include <arch.h>
#include <debug.h>
 
/**
* This spinlock is used by the processors to synchronize during the D-cache
* shootdown.
*/
SPINLOCK_INITIALIZE(dcachelock);
 
/** Initialize the D-cache shootdown sequence.
*
* Start the shootdown sequence by sending out an IPI and wait until all
* processors spin on the dcachelock spinlock.
*/
void dcache_shootdown_start(void)
{
int i;
 
CPU->arch.dcache_active = 0;
spinlock_lock(&dcachelock);
 
ipi_broadcast(IPI_DCACHE_SHOOTDOWN);
 
busy_wait:
for (i = 0; i < config.cpu_count; i++)
if (cpus[i].arch.dcache_active)
goto busy_wait;
}
 
/** Finish the D-cache shootdown sequence. */
void dcache_shootdown_finalize(void)
{
spinlock_unlock(&dcachelock);
CPU->arch.dcache_active = 1;
}
 
/** Process the D-cache shootdown IPI. */
void dcache_shootdown_ipi_recv(void)
{
ASSERT(CPU);
 
CPU->arch.dcache_active = 0;
spinlock_lock(&dcachelock);
spinlock_unlock(&dcachelock);
dcache_flush();
 
CPU->arch.dcache_active = 1;
}
 
#endif /* CONFIG_SMP */
 
/** @}
*/
 
/trunk/kernel/arch/sparc64/src/mm/page.c
73,8 → 73,9
*/
for (i = 0; i < bsp_locked_dtlb_entries; i++) {
dtlb_insert_mapping(bsp_locked_dtlb_entry[i].virt_page,
bsp_locked_dtlb_entry[i].phys_page, bsp_locked_dtlb_entry[i].pagesize_code,
true, false);
bsp_locked_dtlb_entry[i].phys_page,
bsp_locked_dtlb_entry[i].pagesize_code, true,
false);
}
#endif
 
151,9 → 152,12
/*
* Second, save the information about the mapping for APs.
*/
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].virt_page = virtaddr + i*sizemap[order].increment;
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].phys_page = physaddr + i*sizemap[order].increment;
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].pagesize_code = sizemap[order].pagesize_code;
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].virt_page =
virtaddr + i*sizemap[order].increment;
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].phys_page =
physaddr + i*sizemap[order].increment;
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].pagesize_code =
sizemap[order].pagesize_code;
bsp_locked_dtlb_entries++;
#endif
}
/trunk/kernel/arch/ia32/src/drivers/ega.c
46,6 → 46,7
#include <console/chardev.h>
#include <console/console.h>
#include <sysinfo/sysinfo.h>
#include <ddi/ddi.h>
 
/*
* The EGA driver.
52,6 → 53,8
* Simple and short. Function for displaying characters and "scrolling".
*/
 
static parea_t ega_parea; /**< Physical memory area for EGA video RAM. */
 
SPINLOCK_INITIALIZE(egalock);
static uint32_t ega_cursor;
static uint8_t *videoram;
79,11 → 82,19
chardev_initialize("ega_out", &ega_console, &ega_ops);
stdout = &ega_console;
ega_parea.pbase = VIDEORAM;
ega_parea.vbase = (uintptr_t) videoram;
ega_parea.frames = 1;
ega_parea.cacheable = false;
ddi_parea_register(&ega_parea);
 
sysinfo_set_item_val("fb", NULL, true);
sysinfo_set_item_val("fb.kind", NULL, 2);
sysinfo_set_item_val("fb.width", NULL, ROW);
sysinfo_set_item_val("fb.height", NULL, ROWS);
sysinfo_set_item_val("fb.address.physical", NULL, VIDEORAM);
sysinfo_set_item_val("fb.address.color", NULL, PAGE_COLOR((uintptr_t)
videoram));
#ifndef CONFIG_FB
putchar('\n');