/trunk/kernel/genarch/src/fb/fb.c |
---|
45,9 → 45,12 |
#include <config.h> |
#include <bitops.h> |
#include <print.h> |
#include <ddi/ddi.h> |
#include "helenos.xbm" |
static parea_t fb_parea; /**< Physical memory area for fb. */ |
SPINLOCK_INITIALIZE(fb_lock); |
static uint8_t *fbaddress = NULL; |
434,6 → 437,12 |
rows = y / FONT_SCANLINES; |
columns = x / COL_WIDTH; |
fb_parea.pbase = (uintptr_t) addr; |
fb_parea.vbase = (uintptr_t) fbaddress; |
fb_parea.frames = SIZE2FRAMES(fbsize); |
fb_parea.cacheable = false; |
ddi_parea_register(&fb_parea); |
sysinfo_set_item_val("fb", NULL, true); |
sysinfo_set_item_val("fb.kind", NULL, 1); |
sysinfo_set_item_val("fb.width", NULL, xres); |
441,6 → 450,8 |
sysinfo_set_item_val("fb.scanline", NULL, scan); |
sysinfo_set_item_val("fb.visual", NULL, visual); |
sysinfo_set_item_val("fb.address.physical", NULL, addr); |
sysinfo_set_item_val("fb.address.color", NULL, PAGE_COLOR((uintptr_t) |
fbaddress)); |
sysinfo_set_item_val("fb.invert-colors", NULL, invert_colors); |
/* Allocate double buffer */ |
/trunk/kernel/generic/include/ddi/ddi.h |
---|
39,8 → 39,19 |
#include <arch/types.h> |
#include <typedefs.h> |
unative_t sys_physmem_map(unative_t phys_base, unative_t virt_base, unative_t pages, |
unative_t flags); |
/** Structure representing contiguous physical memory area. */ |
typedef struct { |
uintptr_t pbase; /**< Physical base of the area. */ |
uintptr_t vbase; /**< Virtual base of the area. */ |
count_t frames; /**< Number of frames in the area. */ |
bool cacheable; /**< Cacheability. */ |
} parea_t; |
extern void ddi_init(void); |
extern void ddi_parea_register(parea_t *parea); |
extern unative_t sys_physmem_map(unative_t phys_base, unative_t virt_base, |
unative_t pages, unative_t flags); |
extern unative_t sys_iospace_enable(ddi_ioarg_t *uspace_io_arg); |
extern unative_t sys_preempt_control(int enable); |
/trunk/kernel/generic/include/mm/page.h |
---|
70,7 → 70,7 |
/** |
* Macro for computing page color. |
*/ |
#define PAGE_COLOR(va) (((va) >> PAGE_WIDTH) & ((1 << PAGE_COLOR_BITS) - 1)) |
#define PAGE_COLOR(va) (((va) >> PAGE_WIDTH) & ((1 << PAGE_COLOR_BITS) - 1)) |
/** Page fault access type. */ |
enum pf_access { |
82,7 → 82,8 |
/** Operations to manipulate page mappings. */ |
struct page_mapping_operations { |
void (* mapping_insert)(as_t *as, uintptr_t page, uintptr_t frame, int flags); |
void (* mapping_insert)(as_t *as, uintptr_t page, uintptr_t frame, int |
flags); |
void (* mapping_remove)(as_t *as, uintptr_t page); |
pte_t *(* mapping_find)(as_t *as, uintptr_t page); |
}; |
93,7 → 94,8 |
extern void page_init(void); |
extern void page_table_lock(as_t *as, bool lock); |
extern void page_table_unlock(as_t *as, bool unlock); |
extern void page_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int flags); |
extern void page_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int |
flags); |
extern void page_mapping_remove(as_t *as, uintptr_t page); |
extern pte_t *page_mapping_find(as_t *as, uintptr_t page); |
extern pte_t *page_table_create(int flags); |
/trunk/kernel/generic/include/mm/as.h |
---|
94,11 → 94,6 |
/** Address space identifier. Constant on architectures that do not support ASIDs.*/ |
asid_t asid; |
#ifdef CONFIG_VIRT_IDX_DCACHE |
bool dcache_flush_on_install; |
bool dcache_flush_on_deinstall; |
#endif /* CONFIG_VIRT_IDX_DCACHE */ |
/** Architecture specific content. */ |
as_arch_t arch; |
}; |
165,12 → 160,6 |
/** Data to be used by the backend. */ |
mem_backend_data_t backend_data; |
/** |
* Virtual color of the original address space area that was at the beginning |
* of the share chain. |
*/ |
int orig_color; |
}; |
extern as_t *AS_KERNEL; |
/trunk/kernel/generic/src/main/main.c |
---|
80,6 → 80,7 |
#include <adt/btree.h> |
#include <console/klog.h> |
#include <smp/smp.h> |
#include <ddi/ddi.h> |
/** Global configuration structure. */ |
config_t config; |
102,12 → 103,15 |
* the linker or the low level assembler code with |
* appropriate sizes and addresses. |
*/ |
uintptr_t hardcoded_load_address = 0; /**< Virtual address of where the kernel is loaded. */ |
size_t hardcoded_ktext_size = 0; /**< Size of the kernel code in bytes. */ |
size_t hardcoded_kdata_size = 0; /**< Size of the kernel data in bytes. */ |
uintptr_t hardcoded_load_address = 0; /**< Virtual address of where the kernel |
* is loaded. */ |
size_t hardcoded_ktext_size = 0; /**< Size of the kernel code in bytes. |
*/ |
size_t hardcoded_kdata_size = 0; /**< Size of the kernel data in bytes. |
*/ |
uintptr_t stack_safe = 0; /**< Lowest safe stack virtual address. |
*/ |
uintptr_t stack_safe = 0; /**< Lowest safe stack virtual address */ |
void main_bsp(void); |
void main_ap(void); |
141,7 → 145,8 |
config.base = hardcoded_load_address; |
config.memory_size = get_memory_size(); |
config.kernel_size = ALIGN_UP(hardcoded_ktext_size + hardcoded_kdata_size, PAGE_SIZE); |
config.kernel_size = ALIGN_UP(hardcoded_ktext_size + |
hardcoded_kdata_size, PAGE_SIZE); |
config.stack_size = CONFIG_STACK_SIZE; |
/* Initialy the stack is placed just after the kernel */ |
150,14 → 155,18 |
/* Avoid placing stack on top of init */ |
count_t i; |
for (i = 0; i < init.cnt; i++) { |
if (PA_overlaps(config.stack_base, config.stack_size, init.tasks[i].addr, init.tasks[i].size)) |
config.stack_base = ALIGN_UP(init.tasks[i].addr + init.tasks[i].size, config.stack_size); |
if (PA_overlaps(config.stack_base, config.stack_size, |
init.tasks[i].addr, init.tasks[i].size)) |
config.stack_base = ALIGN_UP(init.tasks[i].addr + |
init.tasks[i].size, config.stack_size); |
} |
/* Avoid placing stack on top of boot allocations. */ |
if (ballocs.size) { |
if (PA_overlaps(config.stack_base, config.stack_size, ballocs.base, ballocs.size)) |
config.stack_base = ALIGN_UP(ballocs.base + ballocs.size, PAGE_SIZE); |
if (PA_overlaps(config.stack_base, config.stack_size, |
ballocs.base, ballocs.size)) |
config.stack_base = ALIGN_UP(ballocs.base + |
ballocs.size, PAGE_SIZE); |
} |
if (config.stack_base < stack_safe) |
164,7 → 173,8 |
config.stack_base = ALIGN_UP(stack_safe, PAGE_SIZE); |
context_save(&ctx); |
context_set(&ctx, FADDR(main_bsp_separated_stack), config.stack_base, THREAD_STACK_SIZE); |
context_set(&ctx, FADDR(main_bsp_separated_stack), config.stack_base, |
THREAD_STACK_SIZE); |
context_restore(&ctx); |
/* not reached */ |
} |
200,22 → 210,28 |
* Memory management subsystems initialization. |
*/ |
arch_pre_mm_init(); |
frame_init(); /* Initialize at least 1 memory segment big enough for slab to work */ |
frame_init(); |
/* Initialize at least 1 memory segment big enough for slab to work. */ |
slab_cache_init(); |
btree_init(); |
as_init(); |
page_init(); |
tlb_init(); |
ddi_init(); |
arch_post_mm_init(); |
version_print(); |
printf("kernel: %.*p hardcoded_ktext_size=%zdK, hardcoded_kdata_size=%zdK\n", sizeof(uintptr_t) * 2, config.base, hardcoded_ktext_size >> 10, hardcoded_kdata_size >> 10); |
printf("stack: %.*p size=%zdK\n", sizeof(uintptr_t) * 2, config.stack_base, config.stack_size >> 10); |
printf("kernel: %.*p hardcoded_ktext_size=%zdK, " |
"hardcoded_kdata_size=%zdK\n", sizeof(uintptr_t) * 2, |
config.base, hardcoded_ktext_size >> 10, hardcoded_kdata_size >> |
10); |
printf("stack: %.*p size=%zdK\n", sizeof(uintptr_t) * 2, |
config.stack_base, config.stack_size >> 10); |
arch_pre_smp_init(); |
smp_init(); |
slab_enable_cpucache(); /* Slab must be initialized AFTER we know the number of processors */ |
/* Slab must be initialized after we know the number of processors. */ |
slab_enable_cpucache(); |
printf("config.memory_size=%zdM\n", config.memory_size >> 20); |
printf("config.cpu_count=%zd\n", config.cpu_count); |
232,7 → 248,9 |
if (init.cnt > 0) { |
for (i = 0; i < init.cnt; i++) |
printf("init[%zd].addr=%.*p, init[%zd].size=%zd\n", i, sizeof(uintptr_t) * 2, init.tasks[i].addr, i, init.tasks[i].size); |
printf("init[%zd].addr=%.*p, init[%zd].size=%zd\n", i, |
sizeof(uintptr_t) * 2, init.tasks[i].addr, i, |
init.tasks[i].size); |
} else |
printf("No init binaries found\n"); |
304,7 → 322,8 |
* collide with another CPU coming up. To prevent this, we |
* switch to this cpu's private stack prior to waking kmp up. |
*/ |
context_set(&CPU->saved_context, FADDR(main_ap_separated_stack), (uintptr_t) CPU->stack, CPU_STACK_SIZE); |
context_set(&CPU->saved_context, FADDR(main_ap_separated_stack), |
(uintptr_t) CPU->stack, CPU_STACK_SIZE); |
context_restore(&CPU->saved_context); |
/* not reached */ |
} |
/trunk/kernel/generic/src/sysinfo/sysinfo.c |
---|
230,21 → 230,24 |
printf(" "); |
switch (root->val_type) { |
case SYSINFO_VAL_UNDEFINED: |
val = 0; |
vtype = "UND"; |
break; |
case SYSINFO_VAL_VAL: |
val = root->val.val; |
vtype = "VAL"; |
break; |
case SYSINFO_VAL_FUNCTION: |
val = ((sysinfo_val_fn_t) (root->val.fn)) (root); |
vtype = "FUN"; |
break; |
case SYSINFO_VAL_UNDEFINED: |
val = 0; |
vtype = "UND"; |
break; |
case SYSINFO_VAL_VAL: |
val = root->val.val; |
vtype = "VAL"; |
break; |
case SYSINFO_VAL_FUNCTION: |
val = ((sysinfo_val_fn_t) (root->val.fn)) (root); |
vtype = "FUN"; |
break; |
} |
printf("%s %s val:%d(%x) sub:%s\n", root->name, vtype, val, val, (root->subinfo_type == SYSINFO_SUBINFO_NONE) ? "NON" : ((root->subinfo_type == SYSINFO_SUBINFO_TABLE) ? "TAB" : "FUN")); |
printf("%s %s val:%d(%x) sub:%s\n", root->name, vtype, val, |
val, (root->subinfo_type == SYSINFO_SUBINFO_NONE) ? |
"NON" : ((root->subinfo_type == SYSINFO_SUBINFO_TABLE) ? |
"TAB" : "FUN")); |
if (root->subinfo_type == SYSINFO_SUBINFO_TABLE) |
sysinfo_dump(&(root -> subinfo.table), depth + 1); |
/trunk/kernel/generic/src/time/clock.c |
---|
54,7 → 54,12 |
#include <proc/thread.h> |
#include <sysinfo/sysinfo.h> |
#include <arch/barrier.h> |
#include <mm/frame.h> |
#include <ddi/ddi.h> |
/** Physical memory area of the real time clock. */ |
static parea_t clock_parea; |
/* Pointers to public variables with time */ |
struct ptime { |
unative_t seconds1; |
72,18 → 77,16 |
* The applications (and sometimes kernel) need to access accurate |
* information about realtime data. We allocate 1 page with these |
* data and update it periodically. |
* |
* |
*/ |
void clock_counter_init(void) |
{ |
void *faddr; |
faddr = frame_alloc(0, FRAME_ATOMIC); |
faddr = frame_alloc(ONE_FRAME, FRAME_ATOMIC); |
if (!faddr) |
panic("Cannot allocate page for clock"); |
public_time = (struct ptime *)PA2KA(faddr); |
public_time = (struct ptime *) PA2KA(faddr); |
/* TODO: We would need some arch dependent settings here */ |
public_time->seconds1 = 0; |
90,7 → 93,20 |
public_time->seconds2 = 0; |
public_time->useconds = 0; |
sysinfo_set_item_val("clock.faddr", NULL, (unative_t)faddr); |
clock_parea.pbase = (uintptr_t) faddr; |
clock_parea.vbase = (uintptr_t) public_time; |
clock_parea.frames = 1; |
clock_parea.cacheable = true; |
ddi_parea_register(&clock_parea); |
/* |
* Prepare information for the userspace so that it can successfully |
* physmem_map() the clock_parea. |
*/ |
sysinfo_set_item_val("clock.cacheable", NULL, (unative_t) true); |
sysinfo_set_item_val("clock.fcolor", NULL, (unative_t) |
PAGE_COLOR(clock_parea.vbase)); |
sysinfo_set_item_val("clock.faddr", NULL, (unative_t) faddr); |
} |
/trunk/kernel/generic/src/ddi/ddi.c |
---|
47,20 → 47,60 |
#include <mm/as.h> |
#include <synch/spinlock.h> |
#include <syscall/copy.h> |
#include <adt/btree.h> |
#include <arch.h> |
#include <align.h> |
#include <errno.h> |
/** This lock protects the parea_btree. */ |
SPINLOCK_INITIALIZE(parea_lock); |
/** B+tree with enabled physical memory areas. */ |
static btree_t parea_btree; |
/** Initialize DDI. */ |
void ddi_init(void) |
{ |
btree_create(&parea_btree); |
} |
/** Enable piece of physical memory for mapping by physmem_map(). |
* |
* @param parea Pointer to physical area structure. |
* |
* @todo This function doesn't check for overlaps. It depends on the kernel to |
* create disjunct physical memory areas. |
*/ |
void ddi_parea_register(parea_t *parea) |
{ |
ipl_t ipl; |
ipl = interrupts_disable(); |
spinlock_lock(&parea_lock); |
/* |
* TODO: we should really check for overlaps here. |
* However, we should be safe because the kernel is pretty sane and |
* memory of different devices doesn't overlap. |
*/ |
btree_insert(&parea_btree, (btree_key_t) parea->pbase, parea, NULL); |
spinlock_unlock(&parea_lock); |
interrupts_restore(ipl); |
} |
/** Map piece of physical memory into virtual address space of current task. |
* |
* @param pf Physical frame address of the starting frame. |
* @param vp Virtual page address of the starting page. |
* @param pf Physical address of the starting frame. |
* @param vp Virtual address of the starting page. |
* @param pages Number of pages to map. |
* @param flags Address space area flags for the mapping. |
* |
* @return 0 on success, EPERM if the caller lacks capabilities to use this syscall, |
* ENOENT if there is no task matching the specified ID and ENOMEM if |
* there was a problem in creating address space area. |
* @return 0 on success, EPERM if the caller lacks capabilities to use this |
* syscall, ENOENT if there is no task matching the specified ID or the |
* physical address space is not enabled for mapping and ENOMEM if there |
* was a problem in creating address space area. ENOTSUP is returned when |
* an attempt to create an illegal address alias is detected. |
*/ |
static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, count_t pages, int flags) |
{ |
79,6 → 119,40 |
return EPERM; |
ipl = interrupts_disable(); |
/* |
* Check if the physical memory area is enabled for mapping. |
* If the architecture supports virtually indexed caches, intercept |
* attempts to create an illegal address alias. |
*/ |
spinlock_lock(&parea_lock); |
parea_t *parea; |
btree_node_t *nodep; |
parea = btree_search(&parea_btree, (btree_key_t) pf, &nodep); |
if (!parea || parea->frames < pages || ((flags & AS_AREA_CACHEABLE) && |
!parea->cacheable) || (!(flags & AS_AREA_CACHEABLE) && |
parea->cacheable)) { |
/* |
* This physical memory area cannot be mapped. |
*/ |
spinlock_unlock(&parea_lock); |
interrupts_restore(ipl); |
return ENOENT; |
} |
#ifdef CONFIG_VIRT_IDX_DCACHE |
if (PAGE_COLOR(parea->vbase) != PAGE_COLOR(vp)) { |
/* |
* Refuse to create an illegal address alias. |
*/ |
spinlock_unlock(&parea_lock); |
interrupts_restore(ipl); |
return ENOTSUP; |
} |
#endif /* CONFIG_VIRT_IDX_DCACHE */ |
spinlock_unlock(&parea_lock); |
spinlock_lock(&TASK->lock); |
if (!as_area_create(TASK->as, flags, pages * PAGE_SIZE, vp, AS_AREA_ATTR_NONE, |
107,8 → 181,8 |
* @param ioaddr Starting I/O address. |
* @param size Size of the enabled I/O space.. |
* |
* @return 0 on success, EPERM if the caller lacks capabilities to use this syscall, |
* ENOENT if there is no task matching the specified ID. |
* @return 0 on success, EPERM if the caller lacks capabilities to use this |
* syscall, ENOENT if there is no task matching the specified ID. |
*/ |
static int ddi_iospace_enable(task_id_t id, uintptr_t ioaddr, size_t size) |
{ |
160,12 → 234,12 |
* |
* @return 0 on success, otherwise it returns error code found in errno.h |
*/ |
unative_t sys_physmem_map(unative_t phys_base, unative_t virt_base, unative_t pages, |
unative_t flags) |
unative_t sys_physmem_map(unative_t phys_base, unative_t virt_base, unative_t |
pages, unative_t flags) |
{ |
return (unative_t) ddi_physmem_map(ALIGN_DOWN((uintptr_t) phys_base, FRAME_SIZE), |
ALIGN_DOWN((uintptr_t) virt_base, PAGE_SIZE), (count_t) pages, |
(int) flags); |
return (unative_t) ddi_physmem_map(ALIGN_DOWN((uintptr_t) phys_base, |
FRAME_SIZE), ALIGN_DOWN((uintptr_t) virt_base, PAGE_SIZE), |
(count_t) pages, (int) flags); |
} |
/** Wrapper for SYS_ENABLE_IOSPACE syscall. |
183,14 → 257,15 |
if (rc != 0) |
return (unative_t) rc; |
return (unative_t) ddi_iospace_enable((task_id_t) arg.task_id, (uintptr_t) arg.ioaddr, (size_t) arg.size); |
return (unative_t) ddi_iospace_enable((task_id_t) arg.task_id, |
(uintptr_t) arg.ioaddr, (size_t) arg.size); |
} |
/** Disable or enable preemption. |
* |
* @param enable If non-zero, the preemption counter will be decremented, leading to potential |
* enabling of preemption. Otherwise the preemption counter will be incremented, |
* preventing preemption from occurring. |
* @param enable If non-zero, the preemption counter will be decremented, |
* leading to potential enabling of preemption. Otherwise the preemption |
* counter will be incremented, preventing preemption from occurring. |
* |
* @return Zero on success or EPERM if callers capabilities are not sufficient. |
*/ |
/trunk/kernel/generic/src/console/klog.c |
---|
38,13 → 38,17 |
#include <print.h> |
#include <ddi/device.h> |
#include <ddi/irq.h> |
#include <ddi/ddi.h> |
#include <ipc/irq.h> |
/** Physical memory area used for klog. */ |
static parea_t klog_parea; |
/* |
* For now, we use 0 as INR. |
* However, on some architectures 0 is the clock interrupt (e.g. amd64 and ia32). |
* It is therefore desirable to have architecture specific definition of KLOG_VIRT_INR |
* in the future. |
* However, on some architectures 0 is the clock interrupt (e.g. amd64 and |
* ia32). It is therefore desirable to have architecture specific definition of |
* KLOG_VIRT_INR in the future. |
*/ |
#define KLOG_VIRT_INR 0 |
75,11 → 79,19 |
faddr = frame_alloc(KLOG_ORDER, FRAME_ATOMIC); |
if (!faddr) |
panic("Cannot allocate page for klog"); |
klog = (char *)PA2KA(faddr); |
klog = (char *) PA2KA(faddr); |
devno_t devno = device_assign_devno(); |
sysinfo_set_item_val("klog.faddr", NULL, (unative_t)faddr); |
klog_parea.pbase = (uintptr_t) faddr; |
klog_parea.vbase = (uintptr_t) klog; |
klog_parea.frames = 1 << KLOG_ORDER; |
klog_parea.cacheable = true; |
ddi_parea_register(&klog_parea); |
sysinfo_set_item_val("klog.faddr", NULL, (unative_t) faddr); |
sysinfo_set_item_val("klog.fcolor", NULL, (unative_t) |
PAGE_COLOR((uintptr_t) klog)); |
sysinfo_set_item_val("klog.pages", NULL, 1 << KLOG_ORDER); |
sysinfo_set_item_val("klog.devno", NULL, devno); |
sysinfo_set_item_val("klog.inr", NULL, KLOG_VIRT_INR); |
/trunk/kernel/generic/src/lib/rd.c |
---|
41,11 → 41,15 |
#include <arch/byteorder.h> |
#include <mm/frame.h> |
#include <sysinfo/sysinfo.h> |
#include <ddi/ddi.h> |
static parea_t rd_parea; /**< Physical memory area for rd. */ |
int init_rd(rd_header * header, size_t size) |
{ |
/* Identify RAM disk */ |
if ((header->magic[0] != RD_MAG0) || (header->magic[1] != RD_MAG1) || (header->magic[2] != RD_MAG2) || (header->magic[3] != RD_MAG3)) |
if ((header->magic[0] != RD_MAG0) || (header->magic[1] != RD_MAG1) || |
(header->magic[2] != RD_MAG2) || (header->magic[3] != RD_MAG3)) |
return RE_INVALID; |
/* Identify version */ |
76,9 → 80,18 |
if ((uint64_t) hsize + dsize > size) |
dsize = size - hsize; |
rd_parea.pbase = KA2PA((void *) header + hsize); |
rd_parea.vbase = (uintptr_t) ((void *) header + hsize); |
rd_parea.frames = SIZE2FRAMES(dsize); |
rd_parea.cacheable = true; |
ddi_parea_register(&rd_parea); |
sysinfo_set_item_val("rd", NULL, true); |
sysinfo_set_item_val("rd.size", NULL, dsize); |
sysinfo_set_item_val("rd.address.physical", NULL, (unative_t) KA2PA((void *) header + hsize)); |
sysinfo_set_item_val("rd.address.physical", NULL, (unative_t) |
KA2PA((void *) header + hsize)); |
sysinfo_set_item_val("rd.address.color", NULL, (unative_t) |
PAGE_COLOR((uintptr_t) header + hsize)); |
return RE_OK; |
} |
/trunk/kernel/generic/src/mm/as.c |
---|
166,11 → 166,6 |
as->cpu_refcount = 0; |
as->page_table = page_table_create(flags); |
#ifdef CONFIG_VIRT_IDX_DCACHE |
as->dcache_flush_on_install = false; |
as->dcache_flush_on_deinstall = false; |
#endif /* CONFIG_VIRT_IDX_DCACHE */ |
return as; |
} |
278,18 → 273,6 |
else |
memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data), 0); |
#ifdef CONFIG_VIRT_IDX_DCACHE |
/* |
* When the area is being created with the AS_AREA_ATTR_PARTIAL flag, the |
* orig_color is probably wrong until the flag is reset. In other words, it is |
* initialized with the color of the area being created and not with the color |
* of the original address space area at the beginning of the share chain. Of |
* course, the correct color is set by as_area_share() before the flag is |
* reset. |
*/ |
a->orig_color = PAGE_COLOR(base); |
#endif /* CONFIG_VIRT_IDX_DCACHE */ |
btree_create(&a->used_space); |
btree_insert(&as->as_area_btree, base, (void *) a, NULL); |
575,7 → 558,8 |
* such address space area, EPERM if there was a problem in accepting the area |
* or ENOMEM if there was a problem in allocating destination address space |
* area. ENOTSUP is returned if the address space area backend does not support |
* sharing. |
* sharing or if the kernel detects an attempt to create an illegal address |
* alias. |
*/ |
int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size, |
as_t *dst_as, uintptr_t dst_base, int dst_flags_mask) |
583,7 → 567,6 |
ipl_t ipl; |
int src_flags; |
size_t src_size; |
int src_orig_color; |
as_area_t *src_area, *dst_area; |
share_info_t *sh_info; |
mem_backend_t *src_backend; |
600,7 → 583,6 |
interrupts_restore(ipl); |
return ENOENT; |
} |
if (!src_area->backend || !src_area->backend->share) { |
/* |
617,7 → 599,6 |
src_flags = src_area->flags; |
src_backend = src_area->backend; |
src_backend_data = src_area->backend_data; |
src_orig_color = src_area->orig_color; |
/* Share the cacheable flag from the original mapping */ |
if (src_flags & AS_AREA_CACHEABLE) |
630,6 → 611,20 |
return EPERM; |
} |
#ifdef CONFIG_VIRT_IDX_DCACHE |
if (!(dst_flags_mask & AS_AREA_EXEC)) { |
if (PAGE_COLOR(src_area->base) != PAGE_COLOR(dst_base)) { |
/* |
* Refuse to create an illegal address alias. |
*/ |
mutex_unlock(&src_area->lock); |
mutex_unlock(&src_as->lock); |
interrupts_restore(ipl); |
return ENOTSUP; |
} |
} |
#endif /* CONFIG_VIRT_IDX_DCACHE */ |
/* |
* Now we are committed to sharing the area. |
* First, prepare the area for sharing. |
682,26 → 677,6 |
mutex_lock(&dst_area->lock); |
dst_area->attributes &= ~AS_AREA_ATTR_PARTIAL; |
dst_area->sh_info = sh_info; |
dst_area->orig_color = src_orig_color; |
#ifdef CONFIG_VIRT_IDX_DCACHE |
if (src_orig_color != PAGE_COLOR(dst_base)) { |
/* |
* We have just detected an attempt to create an invalid address |
* alias. We allow this and set a special flag that tells the |
* architecture specific code to flush the D-cache when the |
* offending address space is installed and deinstalled |
* (cleanup). |
* |
* In order for the flags to take effect immediately, we also |
* perform a global D-cache shootdown. |
*/ |
dcache_shootdown_start(); |
dst_as->dcache_flush_on_install = true; |
dst_as->dcache_flush_on_deinstall = true; |
dcache_flush(); |
dcache_shootdown_finalize(); |
} |
#endif /* CONFIG_VIRT_IDX_DCACHE */ |
mutex_unlock(&dst_area->lock); |
mutex_unlock(&dst_as->lock); |
/trunk/kernel/arch/sparc64/include/interrupt.h |
---|
47,8 → 47,7 |
#define VECTOR_TLB_SHOOTDOWN_IPI 0 |
enum { |
IPI_TLB_SHOOTDOWN = VECTOR_TLB_SHOOTDOWN_IPI, |
IPI_DCACHE_SHOOTDOWN |
IPI_TLB_SHOOTDOWN = VECTOR_TLB_SHOOTDOWN_IPI |
}; |
struct istate { |
/trunk/kernel/arch/sparc64/include/mm/cache.h |
---|
35,16 → 35,6 |
#ifndef KERN_sparc64_CACHE_H_ |
#define KERN_sparc64_CACHE_H_ |
#ifdef CONFIG_SMP |
extern void dcache_shootdown_start(void); |
extern void dcache_shootdown_finalize(void); |
extern void dcache_shootdown_ipi_recv(void); |
#else /* CONFIG_SMP */ |
#define dcache_shootdown_start(); |
#define dcache_shootdown_finalize(); |
#define dcache_shootdown_ipi_recv(); |
#endif /* CONFIG_SMP */ |
extern void dcache_flush(void); |
#endif |
/trunk/kernel/arch/sparc64/src/smp/ipi.c |
---|
38,7 → 38,6 |
#include <arch/asm.h> |
#include <config.h> |
#include <mm/tlb.h> |
#include <arch/mm/cache.h> |
#include <arch/interrupt.h> |
#include <arch/trap/interrupt.h> |
#include <arch/barrier.h> |
121,9 → 120,6 |
case IPI_TLB_SHOOTDOWN: |
func = tlb_shootdown_ipi_recv; |
break; |
case IPI_DCACHE_SHOOTDOWN: |
func = dcache_shootdown_ipi_recv; |
break; |
default: |
panic("Unknown IPI (%d).\n", ipi); |
break; |
/trunk/kernel/arch/sparc64/src/trap/interrupt.c |
---|
44,7 → 44,6 |
#include <print.h> |
#include <arch.h> |
#include <mm/tlb.h> |
#include <arch/mm/cache.h> |
#include <config.h> |
#include <synch/spinlock.h> |
91,8 → 90,6 |
#ifdef CONFIG_SMP |
if (data0 == (uintptr_t) tlb_shootdown_ipi_recv) { |
tlb_shootdown_ipi_recv(); |
} else if (data0 == (uintptr_t) dcache_shootdown_ipi_recv) { |
dcache_shootdown_ipi_recv(); |
} |
#endif |
} else { |
/trunk/kernel/arch/sparc64/src/mm/as.c |
---|
49,10 → 49,6 |
#include <macros.h> |
#endif /* CONFIG_TSB */ |
#ifdef CONFIG_VIRT_IDX_DCACHE |
#include <arch/mm/cache.h> |
#endif /* CONFIG_VIRT_IDX_DCACHE */ |
/** Architecture dependent address space init. */ |
void as_arch_init(void) |
{ |
162,23 → 158,6 |
tsb_base.base = ((uintptr_t) as->arch.dtsb) >> PAGE_WIDTH; |
dtsb_base_write(tsb_base.value); |
#endif |
#ifdef CONFIG_VIRT_IDX_DCACHE |
if (as->dcache_flush_on_install) { |
/* |
* Some mappings in this address space are illegal address |
* aliases. Upon their creation, the dcache_flush_on_install |
* flag was set. |
* |
* We are now obliged to flush the D-cache in order to guarantee |
* that there will be at most one cache line for each address |
* alias. |
* |
* This flush performs a cleanup after another address space in |
* which the alias might have existed. |
*/ |
dcache_flush(); |
} |
#endif /* CONFIG_VIRT_IDX_DCACHE */ |
} |
/** Perform sparc64-specific tasks when an address space is removed from the processor. |
213,26 → 192,6 |
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, tsb); |
} |
#endif |
#ifdef CONFIG_VIRT_IDX_DCACHE |
if (as->dcache_flush_on_deinstall) { |
/* |
* Some mappings in this address space are illegal address |
* aliases. Upon their creation, the dcache_flush_on_deinstall |
* flag was set. |
* |
* We are now obliged to flush the D-cache in order to guarantee |
* that there will be at most one cache line for each address |
* alias. |
* |
* This flush performs a cleanup after this address space. It is |
* necessary because other address spaces that contain the same |
* alias are not necessarily aware of the need to carry out the |
* cache flush. The only address spaces that are aware of it are |
* those that created the illegal alias. |
*/ |
dcache_flush(); |
} |
#endif /* CONFIG_VIRT_IDX_DCACHE */ |
} |
/** @} |
/trunk/kernel/arch/sparc64/src/mm/cache.c |
---|
31,68 → 31,10 |
*/ |
/** |
* @file |
* @brief D-cache shootdown algorithm. |
*/ |
#include <arch/mm/cache.h> |
#ifdef CONFIG_SMP |
#include <smp/ipi.h> |
#include <arch/interrupt.h> |
#include <synch/spinlock.h> |
#include <arch.h> |
#include <debug.h> |
/** |
* This spinlock is used by the processors to synchronize during the D-cache |
* shootdown. |
*/ |
SPINLOCK_INITIALIZE(dcachelock); |
/** Initialize the D-cache shootdown sequence. |
* |
* Start the shootdown sequence by sending out an IPI and wait until all |
* processors spin on the dcachelock spinlock. |
*/ |
void dcache_shootdown_start(void) |
{ |
int i; |
CPU->arch.dcache_active = 0; |
spinlock_lock(&dcachelock); |
ipi_broadcast(IPI_DCACHE_SHOOTDOWN); |
busy_wait: |
for (i = 0; i < config.cpu_count; i++) |
if (cpus[i].arch.dcache_active) |
goto busy_wait; |
} |
/** Finish the D-cache shootdown sequence. */ |
void dcache_shootdown_finalize(void) |
{ |
spinlock_unlock(&dcachelock); |
CPU->arch.dcache_active = 1; |
} |
/** Process the D-cache shootdown IPI. */ |
void dcache_shootdown_ipi_recv(void) |
{ |
ASSERT(CPU); |
CPU->arch.dcache_active = 0; |
spinlock_lock(&dcachelock); |
spinlock_unlock(&dcachelock); |
dcache_flush(); |
CPU->arch.dcache_active = 1; |
} |
#endif /* CONFIG_SMP */ |
/** @} |
*/ |
/trunk/kernel/arch/sparc64/src/mm/page.c |
---|
73,8 → 73,9 |
*/ |
for (i = 0; i < bsp_locked_dtlb_entries; i++) { |
dtlb_insert_mapping(bsp_locked_dtlb_entry[i].virt_page, |
bsp_locked_dtlb_entry[i].phys_page, bsp_locked_dtlb_entry[i].pagesize_code, |
true, false); |
bsp_locked_dtlb_entry[i].phys_page, |
bsp_locked_dtlb_entry[i].pagesize_code, true, |
false); |
} |
#endif |
151,9 → 152,12 |
/* |
* Second, save the information about the mapping for APs. |
*/ |
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].virt_page = virtaddr + i*sizemap[order].increment; |
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].phys_page = physaddr + i*sizemap[order].increment; |
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].pagesize_code = sizemap[order].pagesize_code; |
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].virt_page = |
virtaddr + i*sizemap[order].increment; |
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].phys_page = |
physaddr + i*sizemap[order].increment; |
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].pagesize_code = |
sizemap[order].pagesize_code; |
bsp_locked_dtlb_entries++; |
#endif |
} |
/trunk/kernel/arch/ia32/src/drivers/ega.c |
---|
46,6 → 46,7 |
#include <console/chardev.h> |
#include <console/console.h> |
#include <sysinfo/sysinfo.h> |
#include <ddi/ddi.h> |
/* |
* The EGA driver. |
52,6 → 53,8 |
* Simple and short. Function for displaying characters and "scrolling". |
*/ |
static parea_t ega_parea; /**< Physical memory area for EGA video RAM. */ |
SPINLOCK_INITIALIZE(egalock); |
static uint32_t ega_cursor; |
static uint8_t *videoram; |
79,11 → 82,19 |
chardev_initialize("ega_out", &ega_console, &ega_ops); |
stdout = &ega_console; |
ega_parea.pbase = VIDEORAM; |
ega_parea.vbase = (uintptr_t) videoram; |
ega_parea.frames = 1; |
ega_parea.cacheable = false; |
ddi_parea_register(&ega_parea); |
sysinfo_set_item_val("fb", NULL, true); |
sysinfo_set_item_val("fb.kind", NULL, 2); |
sysinfo_set_item_val("fb.width", NULL, ROW); |
sysinfo_set_item_val("fb.height", NULL, ROWS); |
sysinfo_set_item_val("fb.address.physical", NULL, VIDEORAM); |
sysinfo_set_item_val("fb.address.color", NULL, PAGE_COLOR((uintptr_t) |
videoram)); |
#ifndef CONFIG_FB |
putchar('\n'); |
/trunk/uspace/ns/ns.c |
---|
83,17 → 83,19 |
static void *clockaddr = NULL; |
static void *klogaddr = NULL; |
static void get_as_area(ipc_callid_t callid, ipc_call_t *call, char *name, void **addr) |
static void get_as_area(ipc_callid_t callid, ipc_call_t *call, char *name, char *colstr, void **addr) |
{ |
void *ph_addr; |
int ph_color; |
if (!*addr) { |
ph_addr = (void *)sysinfo_value(name); |
ph_addr = (void *) sysinfo_value(name); |
if (!ph_addr) { |
ipc_answer_fast(callid, ENOENT, 0, 0); |
return; |
} |
*addr = as_get_mappable_page(PAGE_SIZE); |
ph_color = (int) sysinfo_value(colstr); |
*addr = as_get_mappable_page(PAGE_SIZE, ph_color); |
physmem_map(ph_addr, *addr, 1, AS_AREA_READ | AS_AREA_CACHEABLE); |
} |
ipc_answer_fast(callid, 0, (ipcarg_t) *addr, AS_AREA_READ); |
116,10 → 118,12 |
case IPC_M_AS_AREA_RECV: |
switch (IPC_GET_ARG3(call)) { |
case SERVICE_MEM_REALTIME: |
get_as_area(callid, &call, "clock.faddr", &clockaddr); |
get_as_area(callid, &call, "clock.faddr", |
"clock.fcolor", &clockaddr); |
break; |
case SERVICE_MEM_KLOG: |
get_as_area(callid, &call, "klog.faddr", &klogaddr); |
get_as_area(callid, &call, "klog.faddr", |
"klog.fcolor", &klogaddr); |
break; |
default: |
ipc_answer_fast(callid, ENOENT, 0, 0); |
/trunk/uspace/fb/main.c |
---|
43,7 → 43,8 |
{ |
void *dest; |
dest = as_get_mappable_page(IPC_GET_ARG2(*call)); |
dest = as_get_mappable_page(IPC_GET_ARG2(*call), |
PAGE_COLOR(IPC_GET_ARG1(*call))); |
if (ipc_answer_fast(callid, 0, (sysarg_t)dest, 0) == 0) { |
if (*area) |
as_area_destroy(*area); |
/trunk/uspace/fb/fb.c |
---|
704,9 → 704,10 |
case IPC_M_AS_AREA_SEND: |
/* We accept one area for data interchange */ |
if (IPC_GET_ARG1(*call) == shm_id) { |
void *dest = as_get_mappable_page(IPC_GET_ARG2(*call)); |
void *dest = as_get_mappable_page(IPC_GET_ARG2(*call), |
PAGE_COLOR(IPC_GET_ARG1(*call))); |
shm_size = IPC_GET_ARG2(*call); |
if (!ipc_answer_fast(callid, 0, (sysarg_t)dest, 0)) |
if (!ipc_answer_fast(callid, 0, (sysarg_t) dest, 0)) |
shm = dest; |
else |
shm_id = 0; |
716,7 → 717,7 |
return 1; |
} else { |
intersize = IPC_GET_ARG2(*call); |
receive_comm_area(callid,call,(void *)&interbuffer); |
receive_comm_area(callid, call, (void *) &interbuffer); |
} |
return 1; |
case FB_PREPARE_SHM: |
1282,12 → 1283,13 |
fb_invert_colors = sysinfo_value("fb.invert-colors"); |
asz = fb_scanline * fb_height; |
fb_addr = as_get_mappable_page(asz); |
fb_addr = as_get_mappable_page(asz, (int) sysinfo_value("fb.address.color")); |
physmem_map(fb_ph_addr, fb_addr, ALIGN_UP(asz, PAGE_SIZE) >> PAGE_WIDTH, |
AS_AREA_READ | AS_AREA_WRITE); |
if (screen_init(fb_addr, fb_width, fb_height, fb_scanline, fb_visual, fb_invert_colors)) |
if (screen_init(fb_addr, fb_width, fb_height, fb_scanline, fb_visual, |
fb_invert_colors)) |
return 0; |
return -1; |
/trunk/uspace/fb/ega.c |
---|
34,7 → 34,6 |
/** @file |
*/ |
#include <stdlib.h> |
#include <unistd.h> |
#include <align.h> |
62,7 → 61,6 |
saved_screen saved_screens[MAX_SAVED_SCREENS]; |
#define EGA_IO_ADDRESS 0x3d4 |
#define EGA_IO_SIZE 2 |
126,12 → 124,14 |
{ |
int i; |
if (rows > 0) { |
memcpy (scr_addr,((char *)scr_addr) + rows * scr_width * 2, scr_width * scr_height * 2 - rows * scr_width * 2); |
memcpy (scr_addr,((char *)scr_addr) + rows * scr_width * 2, |
scr_width * scr_height * 2 - rows * scr_width * 2); |
for (i = 0; i < rows * scr_width ; i ++) |
(((short *)scr_addr) + scr_width * scr_height - rows * scr_width) [i] = ((style << 8) + ' '); |
(((short *)scr_addr) + scr_width * scr_height - rows * |
scr_width) [i] = ((style << 8) + ' '); |
} else if (rows < 0) { |
memcpy (((char *)scr_addr) - rows * scr_width * 2 ,scr_addr ,scr_width * scr_height * 2 + rows * scr_width * 2); |
memcpy (((char *)scr_addr) - rows * scr_width * 2, scr_addr, |
scr_width * scr_height * 2 + rows * scr_width * 2); |
for (i = 0; i < - rows * scr_width ; i++) |
((short *)scr_addr) [i] = ((style << 8 ) + ' '); |
} |
308,13 → 308,14 |
ega_ph_addr=(void *)sysinfo_value("fb.address.physical"); |
scr_width=sysinfo_value("fb.width"); |
scr_height=sysinfo_value("fb.height"); |
iospace_enable(task_get_id(),(void *)EGA_IO_ADDRESS,2); |
iospace_enable(task_get_id(), (void *) EGA_IO_ADDRESS, 2); |
sz = scr_width*scr_height*2; |
scr_addr = as_get_mappable_page(sz); |
sz = scr_width * scr_height * 2; |
scr_addr = as_get_mappable_page(sz, (int) |
sysinfo_value("fb.address.color")); |
physmem_map(ega_ph_addr, scr_addr, ALIGN_UP(sz, PAGE_SIZE) >> PAGE_WIDTH, |
AS_AREA_READ | AS_AREA_WRITE); |
physmem_map(ega_ph_addr, scr_addr, ALIGN_UP(sz, PAGE_SIZE) >> |
PAGE_WIDTH, AS_AREA_READ | AS_AREA_WRITE); |
async_set_client_connection(ega_client_connection); |
/trunk/uspace/klog/klog.c |
---|
63,10 → 63,10 |
printf("Kernel console output.\n"); |
mapping = as_get_mappable_page(PAGE_SIZE); |
mapping = as_get_mappable_page(PAGE_SIZE, sysinfo_value("klog.fcolor")); |
res = ipc_call_sync_3(PHONE_NS, IPC_M_AS_AREA_RECV, |
(sysarg_t)mapping, PAGE_SIZE, SERVICE_MEM_KLOG, |
NULL,NULL,NULL); |
(sysarg_t) mapping, PAGE_SIZE, SERVICE_MEM_KLOG, |
NULL, NULL, NULL); |
if (res) { |
printf("Failed to initialize klog memarea\n"); |
_exit(1); |
/trunk/uspace/rd/rd.c |
---|
73,11 → 73,12 |
{ |
size_t rd_size = sysinfo_value("rd.size"); |
void * rd_ph_addr = (void *) sysinfo_value("rd.address.physical"); |
int rd_color = (int) sysinfo_value("rd.address.color"); |
if (rd_size == 0) |
return false; |
void * rd_addr = as_get_mappable_page(rd_size); |
void * rd_addr = as_get_mappable_page(rd_size, rd_color); |
physmem_map(rd_ph_addr, rd_addr, ALIGN_UP(rd_size, PAGE_SIZE) >> PAGE_WIDTH, AS_AREA_READ | AS_AREA_WRITE); |
/trunk/uspace/libc/include/as.h |
---|
39,12 → 39,15 |
#include <task.h> |
#include <kernel/arch/mm/as.h> |
#include <kernel/mm/as.h> |
#include <libarch/config.h> |
#define PAGE_COLOR(va) (((va) >> PAGE_WIDTH) & ((1 << PAGE_COLOR_BITS) - 1)) |
extern void *as_area_create(void *address, size_t size, int flags); |
extern int as_area_resize(void *address, size_t size, int flags); |
extern int as_area_destroy(void *address); |
extern void *set_maxheapsize(size_t mhs); |
extern void * as_get_mappable_page(size_t sz); |
extern void * as_get_mappable_page(size_t sz, int color); |
#endif |
/trunk/uspace/libc/include/bitops.h |
---|
0,0 → 1,94 |
/* |
* Copyright (C) 2006 Ondrej Palkovsky |
* All rights reserved. |
* |
* Redistribution and use in source and binary forms, with or without |
* modification, are permitted provided that the following conditions |
* are met: |
* |
* - Redistributions of source code must retain the above copyright |
* notice, this list of conditions and the following disclaimer. |
* - Redistributions in binary form must reproduce the above copyright |
* notice, this list of conditions and the following disclaimer in the |
* documentation and/or other materials provided with the distribution. |
* - The name of the author may not be used to endorse or promote products |
* derived from this software without specific prior written permission. |
* |
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup generic |
* @{ |
*/ |
/** @file |
*/ |
#ifndef LIBC_BITOPS_H_ |
#define LIBC_BITOPS_H_ |
#include <types.h> |
/** Return position of first non-zero bit from left (i.e. [log_2(arg)]). |
* |
* If number is zero, it returns 0 |
*/ |
static inline int fnzb32(uint32_t arg) |
{ |
int n = 0; |
if (arg >> 16) { |
arg >>= 16; |
n += 16; |
} |
if (arg >> 8) { |
arg >>= 8; |
n += 8; |
} |
if (arg >> 4) { |
arg >>= 4; |
n += 4; |
} |
if (arg >> 2) { |
arg >>= 2; |
n += 2; |
} |
if (arg >> 1) { |
arg >>= 1; |
n += 1; |
} |
return n; |
} |
static inline int fnzb64(uint64_t arg) |
{ |
int n = 0; |
if (arg >> 32) { |
arg >>= 32; |
n += 32; |
} |
return n + fnzb32((uint32_t) arg); |
} |
#define fnzb(x) fnzb32(x) |
#endif |
/** @} |
*/ |
/trunk/uspace/libc/generic/time.c |
---|
40,6 → 40,7 |
#include <unistd.h> |
#include <atomic.h> |
#include <futex.h> |
#include <sysinfo.h> |
#include <ipc/services.h> |
#include <sysinfo.h> |
71,9 → 72,12 |
int res; |
if (!ktime) { |
mapping = as_get_mappable_page(PAGE_SIZE); |
mapping = as_get_mappable_page(PAGE_SIZE, (int) |
sysinfo_value("clock.fcolor")); |
/* Get the mapping of kernel clock */ |
res = ipc_call_sync_3(PHONE_NS, IPC_M_AS_AREA_RECV, (sysarg_t) mapping, PAGE_SIZE, SERVICE_MEM_REALTIME, NULL, &rights, NULL); |
res = ipc_call_sync_3(PHONE_NS, IPC_M_AS_AREA_RECV, (sysarg_t) |
mapping, PAGE_SIZE, SERVICE_MEM_REALTIME, NULL, &rights, |
NULL); |
if (res) { |
printf("Failed to initialize timeofday memarea\n"); |
_exit(1); |
/trunk/uspace/libc/generic/as.c |
---|
37,6 → 37,7 |
#include <unistd.h> |
#include <align.h> |
#include <types.h> |
#include <bitops.h> |
/** |
* Either 4*256M on 32-bit architecures or 16*256M on 64-bit architectures. |
53,12 → 54,14 |
*/ |
void *as_area_create(void *address, size_t size, int flags) |
{ |
return (void *) __SYSCALL3(SYS_AS_AREA_CREATE, (sysarg_t ) address, (sysarg_t) size, (sysarg_t) flags); |
return (void *) __SYSCALL3(SYS_AS_AREA_CREATE, (sysarg_t ) address, |
(sysarg_t) size, (sysarg_t) flags); |
} |
/** Resize address space area. |
* |
* @param address Virtual address pointing into already existing address space area. |
* @param address Virtual address pointing into already existing address space |
* area. |
* @param size New requested size of the area. |
* @param flags Currently unused. |
* |
66,12 → 69,14 |
*/ |
int as_area_resize(void *address, size_t size, int flags) |
{ |
return __SYSCALL3(SYS_AS_AREA_RESIZE, (sysarg_t ) address, (sysarg_t) size, (sysarg_t) flags); |
return __SYSCALL3(SYS_AS_AREA_RESIZE, (sysarg_t ) address, (sysarg_t) |
size, (sysarg_t) flags); |
} |
/** Destroy address space area. |
* |
* @param address Virtual address pointing into the address space area being destroyed. |
* @param address Virtual address pointing into the address space area being |
* destroyed. |
* |
* @return Zero on success or a code from @ref errno.h on failure. |
*/ |
133,28 → 138,53 |
maxheapsize = mhs; |
/* Return pointer to area not managed by sbrk */ |
return ((void *) &_heap + maxheapsize); |
} |
/** Return pointer to some unmapped area, where fits new as_area |
* |
* @param sz Requested size of the allocation. |
* @param color Requested virtual color of the allocation. |
* |
* @return Pointer to the beginning |
* |
* TODO: make some first_fit/... algorithm, we are now just incrementing |
* the pointer to last area |
*/ |
void * as_get_mappable_page(size_t sz) |
#include <stdio.h> |
void *as_get_mappable_page(size_t sz, int color) |
{ |
void *res; |
uint64_t asz; |
int i; |
if (!sz) |
return NULL; |
asz = 1 << (fnzb64(sz - 1) + 1); |
/* Set heapsize to some meaningful value */ |
if (maxheapsize == -1) |
set_maxheapsize(MAX_HEAP_SIZE); |
if (!last_allocated) |
last_allocated = (void *) ALIGN_UP((void *) &_heap + maxheapsize, PAGE_SIZE); |
sz = ALIGN_UP(sz, PAGE_SIZE); |
/* |
* Make sure we allocate from naturally aligned address and a page of |
* appropriate color. |
*/ |
i = 0; |
do { |
if (!last_allocated) { |
last_allocated = (void *) ALIGN_UP((void *) &_heap + |
maxheapsize, asz); |
} else { |
last_allocated = (void *) ALIGN_UP(((uintptr_t) |
last_allocated) + (int) (i > 0), asz); |
} |
} while ((asz < (1 << (PAGE_COLOR_BITS + PAGE_WIDTH))) && |
(PAGE_COLOR((uintptr_t) last_allocated) != color) && |
(++i < (1 << PAGE_COLOR_BITS))); |
res = last_allocated; |
last_allocated += sz; |
last_allocated += ALIGN_UP(sz, PAGE_SIZE); |
return res; |
} |
/trunk/uspace/libc/generic/mman.c |
---|
39,7 → 39,7 |
void *mmap(void *start, size_t length, int prot, int flags, int fd, off_t offset) |
{ |
if (!start) |
start = as_get_mappable_page(length); |
start = as_get_mappable_page(length, 0); |
// if (! ((flags & MAP_SHARED) ^ (flags & MAP_PRIVATE))) |
// return MAP_FAILED; |
/trunk/uspace/libc/arch/sparc64/include/config.h |
---|
37,6 → 37,7 |
#define PAGE_WIDTH 13 |
#define PAGE_SIZE (1<<PAGE_WIDTH) |
#define PAGE_COLOR_BITS 1 /**< Bit 13 is the page color. */ |
#endif |
/trunk/uspace/libc/arch/ia64/include/config.h |
---|
37,6 → 37,7 |
#define PAGE_WIDTH 14 |
#define PAGE_SIZE (1<<PAGE_WIDTH) |
#define PAGE_COLOR_BITS 0 /* dummy */ |
#endif |
/trunk/uspace/libc/arch/ppc32/include/config.h |
---|
37,6 → 37,7 |
#define PAGE_WIDTH 12 |
#define PAGE_SIZE (1<<PAGE_WIDTH) |
#define PAGE_COLOR_BITS 0 /* dummy */ |
#endif |
/trunk/uspace/libc/arch/amd64/include/config.h |
---|
37,6 → 37,7 |
#define PAGE_WIDTH 12 |
#define PAGE_SIZE (1<<PAGE_WIDTH) |
#define PAGE_COLOR_BITS 0 /* dummy */ |
#endif |
/trunk/uspace/libc/arch/ppc64/include/config.h |
---|
37,6 → 37,7 |
#define PAGE_WIDTH 12 |
#define PAGE_SIZE (1<<PAGE_WIDTH) |
#define PAGE_COLOR_BITS 0 /* dummy */ |
#endif |
/trunk/uspace/libc/arch/mips32/include/config.h |
---|
37,6 → 37,7 |
#define PAGE_WIDTH 14 |
#define PAGE_SIZE (1<<PAGE_WIDTH) |
#define PAGE_COLOR_BITS 0 /* dummy */ |
#endif |
/trunk/uspace/libc/arch/ia32/include/config.h |
---|
37,6 → 37,7 |
#define PAGE_WIDTH 12 |
#define PAGE_SIZE (1<<PAGE_WIDTH) |
#define PAGE_COLOR_BITS 0 /* dummy */ |
#endif |