Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 3152 → Rev 3153

/branches/dynload/kernel/generic/src/main/main.c
80,6 → 80,7
#include <adt/btree.h>
#include <smp/smp.h>
#include <ddi/ddi.h>
#include <console/console.h>
 
/** Global configuration structure. */
config_t config;
256,6 → 257,7
printf("No init binaries found\n");
LOG_EXEC(ipc_init());
LOG_EXEC(klog_init());
 
/*
* Create kernel task.
/branches/dynload/kernel/generic/src/cpu/cpu.c
67,7 → 67,7
panic("malloc/cpus");
 
/* initialize everything */
memsetb((uintptr_t) cpus, sizeof(cpu_t) * config.cpu_count, 0);
memsetb(cpus, sizeof(cpu_t) * config.cpu_count, 0);
 
for (i = 0; i < config.cpu_count; i++) {
cpus[i].stack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | FRAME_ATOMIC);
/branches/dynload/kernel/generic/src/console/console.c
35,19 → 35,27
 
#include <console/console.h>
#include <console/chardev.h>
#include <sysinfo/sysinfo.h>
#include <synch/waitq.h>
#include <synch/spinlock.h>
#include <arch/types.h>
#include <ddi/device.h>
#include <ddi/irq.h>
#include <ddi/ddi.h>
#include <ipc/irq.h>
#include <arch.h>
#include <func.h>
#include <print.h>
#include <atomic.h>
 
#define KLOG_SIZE 4096
#define KLOG_SIZE PAGE_SIZE
#define KLOG_LATENCY 8
 
/**< Kernel log cyclic buffer */
static char klog[KLOG_SIZE];
static char klog[KLOG_SIZE] __attribute__ ((aligned (PAGE_SIZE)));
 
/**< Kernel log initialized */
static bool klog_inited = false;
/**< First kernel log characters */
static index_t klog_start = 0;
/**< Number of valid kernel log characters */
54,7 → 62,24
static size_t klog_len = 0;
/**< Number of stored (not printed) kernel log characters */
static size_t klog_stored = 0;
/**< Number of stored kernel log characters for uspace */
static size_t klog_uspace = 0;
 
/**< Kernel log spinlock */
SPINLOCK_INITIALIZE(klog_lock);
 
/** Physical memory area used for klog buffer */
static parea_t klog_parea;
/*
* For now, we use 0 as INR.
* However, it is therefore desirable to have architecture specific
* definition of KLOG_VIRT_INR in the future.
*/
#define KLOG_VIRT_INR 0
 
static irq_t klog_irq;
 
static chardev_operations_t null_stdout_ops = {
.suspend = NULL,
.resume = NULL,
67,10 → 92,58
.op = &null_stdout_ops
};
 
/** Standard input character device. */
/** Allways refuse IRQ ownership.
*
* This is not a real IRQ, so we always decline.
*
* @return Always returns IRQ_DECLINE.
*/
static irq_ownership_t klog_claim(void)
{
return IRQ_DECLINE;
}
 
/** Standard input character device */
chardev_t *stdin = NULL;
chardev_t *stdout = &null_stdout;
 
/** Initialize kernel logging facility
*
* The shared area contains kernel cyclic buffer. Userspace application may
* be notified on new data with indication of position and size
* of the data within the circular buffer.
*/
void klog_init(void)
{
void *faddr = (void *) KA2PA(klog);
ASSERT((uintptr_t) faddr % FRAME_SIZE == 0);
ASSERT(KLOG_SIZE % FRAME_SIZE == 0);
 
devno_t devno = device_assign_devno();
klog_parea.pbase = (uintptr_t) faddr;
klog_parea.vbase = (uintptr_t) klog;
klog_parea.frames = SIZE2FRAMES(KLOG_SIZE);
klog_parea.cacheable = true;
ddi_parea_register(&klog_parea);
 
sysinfo_set_item_val("klog.faddr", NULL, (unative_t) faddr);
sysinfo_set_item_val("klog.pages", NULL, SIZE2FRAMES(KLOG_SIZE));
sysinfo_set_item_val("klog.devno", NULL, devno);
sysinfo_set_item_val("klog.inr", NULL, KLOG_VIRT_INR);
 
irq_initialize(&klog_irq);
klog_irq.devno = devno;
klog_irq.inr = KLOG_VIRT_INR;
klog_irq.claim = klog_claim;
irq_register(&klog_irq);
spinlock_lock(&klog_lock);
klog_inited = true;
spinlock_unlock(&klog_lock);
}
 
/** Get character from character device. Do not echo character.
*
* @param chardev Character device.
160,8 → 233,22
return ch;
}
 
void klog_update(void)
{
spinlock_lock(&klog_lock);
if ((klog_inited) && (klog_irq.notif_cfg.notify) && (klog_uspace > 0)) {
ipc_irq_send_msg_3(&klog_irq, klog_start, klog_len, klog_uspace);
klog_uspace = 0;
}
spinlock_unlock(&klog_lock);
}
 
void putchar(char c)
{
spinlock_lock(&klog_lock);
if ((klog_stored > 0) && (stdout->op->write)) {
/* Print charaters stored in kernel log */
index_t i;
184,6 → 271,22
if (klog_stored < klog_len)
klog_stored++;
}
/* The character is stored for uspace */
if (klog_uspace < klog_len)
klog_uspace++;
/* Check notify uspace to update */
bool update;
if ((klog_uspace > KLOG_LATENCY) || (c == '\n'))
update = true;
else
update = false;
spinlock_unlock(&klog_lock);
if (update)
klog_update();
}
 
/** @}
/branches/dynload/kernel/generic/src/proc/task.c
45,6 → 45,7
#include <synch/spinlock.h>
#include <synch/waitq.h>
#include <arch.h>
#include <arch/barrier.h>
#include <panic.h>
#include <adt/avl.h>
#include <adt/btree.h>
447,8 → 448,16
int rc = copy_from_uspace(kimage, image, size);
if (rc != EOK)
return rc;
 
/*
* Not very efficient and it would be better to call it on code only,
* but this whole function is a temporary hack anyway and one day it
* will go in favor of the userspace dynamic loader.
*/
smc_coherence_block(kimage, size);
uspace_arg_t *kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
uspace_arg_t *kernel_uarg;
kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
if (kernel_uarg == NULL) {
free(kimage);
return ENOMEM;
477,9 → 486,9
}
as_area_t *area = as_area_create(as,
AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE,
LOADED_PROG_STACK_PAGES_NO * PAGE_SIZE, USTACK_ADDRESS,
AS_AREA_ATTR_NONE, &anon_backend, NULL);
AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE,
LOADED_PROG_STACK_PAGES_NO * PAGE_SIZE, USTACK_ADDRESS,
AS_AREA_ATTR_NONE, &anon_backend, NULL);
if (area == NULL) {
as_destroy(as);
free(kernel_uarg);
499,7 → 508,7
cap_set(task, cap_get(TASK));
thread_t *thread = thread_create(uinit, kernel_uarg, task,
THREAD_FLAG_USPACE, "user", false);
THREAD_FLAG_USPACE, "user", false);
if (thread == NULL) {
task_destroy(task);
as_destroy(as);
630,15 → 639,15
order(task_get_accounting(t), &cycles, &suffix);
 
#ifdef __32_BITS__
printf("%-6" PRIu64 " %-10s %-3" PRIu32 " %10p %10p %9" PRIu64 "%c %7ld %6ld",
t->taskid, t->name, t->context, t, t->as, cycles, suffix,
atomic_get(&t->refcount), atomic_get(&t->active_calls));
printf("%-6" PRIu64 " %-10s %-3" PRIu32 " %10p %10p %9" PRIu64
"%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles,
suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls));
#endif
 
#ifdef __64_BITS__
printf("%-6" PRIu64 " %-10s %-3" PRIu32 " %18p %18p %9" PRIu64 "%c %7ld %6ld",
t->taskid, t->name, t->context, t, t->as, cycles, suffix,
atomic_get(&t->refcount), atomic_get(&t->active_calls));
printf("%-6" PRIu64 " %-10s %-3" PRIu32 " %18p %18p %9" PRIu64
"%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles,
suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls));
#endif
 
for (j = 0; j < IPC_MAX_PHONES; j++) {
662,16 → 671,16
 
#ifdef __32_BITS__
printf("taskid name ctx address as "
"cycles threads calls callee\n");
"cycles threads calls callee\n");
printf("------ ---------- --- ---------- ---------- "
"---------- ------- ------ ------>\n");
"---------- ------- ------ ------>\n");
#endif
 
#ifdef __64_BITS__
printf("taskid name ctx address as "
"cycles threads calls callee\n");
"cycles threads calls callee\n");
printf("------ ---------- --- ------------------ ------------------ "
"---------- ------- ------ ------>\n");
"---------- ------- ------ ------>\n");
#endif
 
avltree_walk(&tasks_tree, task_print_walker, NULL);
/branches/dynload/kernel/generic/src/proc/thread.c
293,8 → 293,7
return NULL;
/* Not needed, but good for debugging */
memsetb((uintptr_t) t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES,
0);
memsetb(t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0);
ipl = interrupts_disable();
spinlock_lock(&tidlock);
/branches/dynload/kernel/generic/src/lib/memstr.c
87,7 → 87,7
* @param x Value to fill.
*
*/
void _memsetb(uintptr_t dst, size_t cnt, uint8_t x)
void _memsetb(void *dst, size_t cnt, uint8_t x)
{
unsigned int i;
uint8_t *p = (uint8_t *) dst;
106,7 → 106,7
* @param x Value to fill.
*
*/
void _memsetw(uintptr_t dst, size_t cnt, uint16_t x)
void _memsetw(void *dst, size_t cnt, uint16_t x)
{
unsigned int i;
uint16_t *p = (uint16_t *) dst;
/branches/dynload/kernel/generic/src/lib/objc_ext.c
161,7 → 161,7
 
void *memset(void *s, int c, size_t n)
{
memsetb((uintptr_t) s, n, c);
memsetb(s, n, c);
return s;
}
 
/branches/dynload/kernel/generic/src/adt/hash_table.c
63,7 → 63,7
if (!h->entry) {
panic("cannot allocate memory for hash table\n");
}
memsetb((uintptr_t) h->entry, m * sizeof(link_t), 0);
memsetb(h->entry, m * sizeof(link_t), 0);
for (i = 0; i < m; i++)
list_initialize(&h->entry[i]);
/branches/dynload/kernel/generic/src/mm/slab.c
559,8 → 559,7
 
cache->mag_cache = malloc(sizeof(slab_mag_cache_t) * config.cpu_count,0);
for (i = 0; i < config.cpu_count; i++) {
memsetb((uintptr_t)&cache->mag_cache[i],
sizeof(cache->mag_cache[i]), 0);
memsetb(&cache->mag_cache[i], sizeof(cache->mag_cache[i]), 0);
spinlock_initialize(&cache->mag_cache[i].lock, "slab_maglock_cpu");
}
}
578,7 → 577,7
int pages;
ipl_t ipl;
 
memsetb((uintptr_t)cache, sizeof(*cache), 0);
memsetb(cache, sizeof(*cache), 0);
cache->name = name;
 
if (align < sizeof(unative_t))
/branches/dynload/kernel/generic/src/mm/backend_anon.c
113,7 → 113,7
}
if (allocate) {
frame = (uintptr_t) frame_alloc(ONE_FRAME, 0);
memsetb(PA2KA(frame), FRAME_SIZE, 0);
memsetb((void *) PA2KA(frame), FRAME_SIZE, 0);
dirty = true;
/*
144,7 → 144,7
* the different causes
*/
frame = (uintptr_t) frame_alloc(ONE_FRAME, 0);
memsetb(PA2KA(frame), FRAME_SIZE, 0);
memsetb((void *) PA2KA(frame), FRAME_SIZE, 0);
dirty = true;
}
/branches/dynload/kernel/generic/src/mm/as.c
324,8 → 324,7
if (backend_data)
a->backend_data = *backend_data;
else
memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data),
0);
memsetb(&a->backend_data, sizeof(a->backend_data), 0);
 
btree_create(&a->used_space);
/branches/dynload/kernel/generic/src/mm/page.c
40,11 → 40,28
* They however, define the single interface.
*/
 
/*
* Note on memory prefetching and updating memory mappings, also described in:
* AMD x86-64 Architecture Programmer's Manual, Volume 2, System Programming,
* 7.2.1 Special Coherency Considerations.
*
* The processor which modifies a page table mapping can access prefetched data
* from the old mapping. In order to prevent this, we place a memory barrier
* after a mapping is updated.
*
* We assume that the other processors are either not using the mapping yet
* (i.e. during the bootstrap) or are executing the TLB shootdown code. While
* we don't care much about the former case, the processors in the latter case
* will do an implicit serialization by virtue of running the TLB shootdown
* interrupt handler.
*/
 
#include <mm/page.h>
#include <arch/mm/page.h>
#include <arch/mm/asid.h>
#include <mm/as.h>
#include <mm/frame.h>
#include <arch/barrier.h>
#include <arch/types.h>
#include <arch/asm.h>
#include <memstr.h>
65,8 → 82,8
* considering possible crossings
* of page boundaries.
*
* @param s Address of the structure.
* @param size Size of the structure.
* @param s Address of the structure.
* @param size Size of the structure.
*/
void map_structure(uintptr_t s, size_t size)
{
76,8 → 93,11
cnt = length / PAGE_SIZE + (length % PAGE_SIZE > 0);
 
for (i = 0; i < cnt; i++)
page_mapping_insert(AS_KERNEL, s + i * PAGE_SIZE, s + i * PAGE_SIZE, PAGE_NOT_CACHEABLE | PAGE_WRITE);
page_mapping_insert(AS_KERNEL, s + i * PAGE_SIZE,
s + i * PAGE_SIZE, PAGE_NOT_CACHEABLE | PAGE_WRITE);
 
/* Repel prefetched accesses to the old mapping. */
memory_barrier();
}
 
/** Insert mapping of page to frame.
87,10 → 107,11
*
* The page table must be locked and interrupts must be disabled.
*
* @param as Address space to wich page belongs.
* @param page Virtual address of the page to be mapped.
* @param frame Physical address of memory frame to which the mapping is done.
* @param flags Flags to be used for mapping.
* @param as Address space to wich page belongs.
* @param page Virtual address of the page to be mapped.
* @param frame Physical address of memory frame to which the mapping is
* done.
* @param flags Flags to be used for mapping.
*/
void page_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int flags)
{
98,6 → 119,9
ASSERT(page_mapping_operations->mapping_insert);
page_mapping_operations->mapping_insert(as, page, frame, flags);
/* Repel prefetched accesses to the old mapping. */
memory_barrier();
}
 
/** Remove mapping of page.
108,8 → 132,8
*
* The page table must be locked and interrupts must be disabled.
*
* @param as Address space to wich page belongs.
* @param page Virtual address of the page to be demapped.
* @param as Address space to wich page belongs.
* @param page Virtual address of the page to be demapped.
*/
void page_mapping_remove(as_t *as, uintptr_t page)
{
117,6 → 141,9
ASSERT(page_mapping_operations->mapping_remove);
page_mapping_operations->mapping_remove(as, page);
 
/* Repel prefetched accesses to the old mapping. */
memory_barrier();
}
 
/** Find mapping for virtual page
125,10 → 152,11
*
* The page table must be locked and interrupts must be disabled.
*
* @param as Address space to wich page belongs.
* @param page Virtual page.
* @param as Address space to wich page belongs.
* @param page Virtual page.
*
* @return NULL if there is no such mapping; requested mapping otherwise.
* @return NULL if there is no such mapping; requested mapping
* otherwise.
*/
pte_t *page_mapping_find(as_t *as, uintptr_t page)
{
/branches/dynload/kernel/generic/src/mm/backend_elf.c
48,6 → 48,7
#include <memstr.h>
#include <macros.h>
#include <arch.h>
#include <arch/barrier.h>
 
#ifdef CONFIG_VIRT_IDX_DCACHE
#include <arch/mm/cache.h>
67,12 → 68,13
*
* The address space area and page tables must be already locked.
*
* @param area Pointer to the address space area.
* @param addr Faulting virtual address.
* @param access Access mode that caused the fault (i.e. read/write/exec).
* @param area Pointer to the address space area.
* @param addr Faulting virtual address.
* @param access Access mode that caused the fault (i.e.
* read/write/exec).
*
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e.
* serviced).
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK
* on success (i.e. serviced).
*/
int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)
{
150,6 → 152,10
frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
memcpy((void *) PA2KA(frame),
(void *) (base + i * FRAME_SIZE), FRAME_SIZE);
if (entry->p_flags & PF_X) {
smc_coherence_block((void *) PA2KA(frame),
FRAME_SIZE);
}
dirty = true;
} else {
frame = KA2PA(base + i * FRAME_SIZE);
162,7 → 168,7
* and cleared.
*/
frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
memsetb(PA2KA(frame), FRAME_SIZE, 0);
memsetb((void *) PA2KA(frame), FRAME_SIZE, 0);
dirty = true;
} else {
size_t pad_lo, pad_hi;
187,8 → 193,13
memcpy((void *) (PA2KA(frame) + pad_lo),
(void *) (base + i * FRAME_SIZE + pad_lo),
FRAME_SIZE - pad_lo - pad_hi);
memsetb(PA2KA(frame), pad_lo, 0);
memsetb(PA2KA(frame) + FRAME_SIZE - pad_hi, pad_hi, 0);
if (entry->p_flags & PF_X) {
smc_coherence_block((void *) (PA2KA(frame) + pad_lo),
FRAME_SIZE - pad_lo - pad_hi);
}
memsetb((void *) PA2KA(frame), pad_lo, 0);
memsetb((void *) (PA2KA(frame) + FRAME_SIZE - pad_hi), pad_hi,
0);
dirty = true;
}
 
212,9 → 223,10
*
* The address space area and page tables must be already locked.
*
* @param area Pointer to the address space area.
* @param page Page that is mapped to frame. Must be aligned to PAGE_SIZE.
* @param frame Frame to be released.
* @param area Pointer to the address space area.
* @param page Page that is mapped to frame. Must be aligned to
* PAGE_SIZE.
* @param frame Frame to be released.
*
*/
void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame)
257,7 → 269,7
*
* The address space and address space area must be locked prior to the call.
*
* @param area Address space area.
* @param area Address space area.
*/
void elf_share(as_area_t *area)
{
/branches/dynload/kernel/generic/src/syscall/syscall.c
57,7 → 57,7
* Print to kernel log.
*
*/
static unative_t sys_io(int fd, const void * buf, size_t count)
static unative_t sys_klog(int fd, const void * buf, size_t count)
{
size_t i;
char *data;
65,20 → 65,23
 
if (count > PAGE_SIZE)
return ELIMIT;
 
data = (char *) malloc(count, 0);
if (!data)
return ENOMEM;
rc = copy_from_uspace(data, buf, count);
if (rc) {
if (count > 0) {
data = (char *) malloc(count, 0);
if (!data)
return ENOMEM;
rc = copy_from_uspace(data, buf, count);
if (rc) {
free(data);
return rc;
}
for (i = 0; i < count; i++)
putchar(data[i]);
free(data);
return rc;
}
 
for (i = 0; i < count; i++)
putchar(data[i]);
free(data);
} else
klog_update();
return count;
}
118,7 → 121,7
}
 
syshandler_t syscall_table[SYSCALL_END] = {
(syshandler_t) sys_io,
(syshandler_t) sys_klog,
(syshandler_t) sys_tls_set,
/* Thread and task related syscalls. */
/branches/dynload/kernel/generic/src/ipc/ipc.c
66,7 → 66,7
*/
static void _ipc_call_init(call_t *call)
{
memsetb((uintptr_t) call, sizeof(*call), 0);
memsetb(call, sizeof(*call), 0);
call->callerbox = &TASK->answerbox;
call->sender = TASK;
call->buffer = NULL;