Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 3423 → Rev 3424

/branches/tracing/kernel/test/avltree/avltree1.c
48,7 → 48,8
 
static int test_tree_balance(avltree_node_t *node);
static avltree_node_t *test_tree_parents(avltree_node_t *node);
static void print_tree_structure_flat (avltree_node_t *node, int level);
static void print_tree_structure_flat (avltree_node_t *node, int level)
__attribute__ ((used));
static avltree_node_t *alloc_avltree_node(void);
 
static avltree_node_t *test_tree_parents(avltree_node_t *node)
61,14 → 62,15
if (node->lft) {
tmp = test_tree_parents(node->lft);
if (tmp != node) {
printf("Bad parent pointer key: %d, address: %p\n",
tmp->key, node->lft);
printf("Bad parent pointer key: %" PRIu64
", address: %p\n", tmp->key, node->lft);
}
}
if (node->rgt) {
tmp = test_tree_parents(node->rgt);
if (tmp != node) {
printf("Bad parent pointer key: %d, address: %p\n",
printf("Bad parent pointer key: %" PRIu64
", address: %p\n",
tmp->key,node->rgt);
}
}
94,7 → 96,8
* Prints the structure of the node, which is level levels from the top of the
* tree.
*/
static void print_tree_structure_flat(avltree_node_t *node, int level)
static void
print_tree_structure_flat(avltree_node_t *node, int level)
{
/*
* You can set the maximum level as high as you like.
109,7 → 112,7
if (node == NULL)
return;
 
printf("%d[%d]", node->key, node->balance);
printf("%" PRIu64 "[%" PRIu8 "]", node->key, node->balance);
if (node->lft != NULL || node->rgt != NULL) {
printf("(");
 
130,6 → 133,7
for (i = 0; i < NODE_COUNT - 1; i++) {
avltree_nodes[i].par = &avltree_nodes[i + 1];
}
avltree_nodes[i].par = NULL;
/*
* Node keys which will be used for insertion. Up to NODE_COUNT size of
169,7 → 173,6
for (i = 21; i < NODE_COUNT; i++)
avltree_nodes[i].key = i * 3;
avltree_nodes[i].par = NULL;
first_free_node = &avltree_nodes[0];
}
 
191,7 → 194,7
avltree_create(tree);
if (!quiet)
printf("Inserting %d nodes...", node_count);
printf("Inserting %" PRIc " nodes...", node_count);
 
for (i = 0; i < node_count; i++) {
newnode = alloc_avltree_node();
/branches/tracing/kernel/test/synch/rwlock3.c
45,14 → 45,14
thread_detach(THREAD);
if (!sh_quiet)
printf("cpu%d, tid %llu: trying to lock rwlock for reading....\n", CPU->id, THREAD->tid);
printf("cpu%u, tid %" PRIu64 ": trying to lock rwlock for reading....\n", CPU->id, THREAD->tid);
rwlock_read_lock(&rwlock);
rwlock_read_unlock(&rwlock);
if (!sh_quiet) {
printf("cpu%d, tid %llu: success\n", CPU->id, THREAD->tid);
printf("cpu%d, tid %llu: trying to lock rwlock for writing....\n", CPU->id, THREAD->tid);
printf("cpu%u, tid %" PRIu64 ": success\n", CPU->id, THREAD->tid);
printf("cpu%u, tid %" PRIu64 ": trying to lock rwlock for writing....\n", CPU->id, THREAD->tid);
}
 
rwlock_write_lock(&rwlock);
59,7 → 59,7
rwlock_write_unlock(&rwlock);
if (!sh_quiet)
printf("cpu%d, tid %llu: success\n", CPU->id, THREAD->tid);
printf("cpu%u, tid %" PRIu64 ": success\n", CPU->id, THREAD->tid);
atomic_dec(&thread_count);
}
88,7 → 88,7
while (atomic_get(&thread_count) > 0) {
if (!quiet)
printf("Threads left: %d\n", atomic_get(&thread_count));
printf("Threads left: %ld\n", atomic_get(&thread_count));
thread_sleep(1);
}
/branches/tracing/kernel/test/synch/rwlock4.c
74,18 → 74,18
to = random(40000);
if (!sh_quiet)
printf("cpu%d, tid %llu w+ (%d)\n", CPU->id, THREAD->tid, to);
printf("cpu%u, tid %" PRIu64 " w+ (%d)\n", CPU->id, THREAD->tid, to);
rc = rwlock_write_lock_timeout(&rwlock, to);
if (SYNCH_FAILED(rc)) {
if (!sh_quiet)
printf("cpu%d, tid %llu w!\n", CPU->id, THREAD->tid);
printf("cpu%u, tid %" PRIu64 " w!\n", CPU->id, THREAD->tid);
atomic_dec(&thread_count);
return;
}
if (!sh_quiet)
printf("cpu%d, tid %llu w=\n", CPU->id, THREAD->tid);
printf("cpu%u, tid %" PRIu64 " w=\n", CPU->id, THREAD->tid);
 
if (rwlock.readers_in) {
if (!sh_quiet)
106,7 → 106,7
rwlock_write_unlock(&rwlock);
if (!sh_quiet)
printf("cpu%d, tid %llu w-\n", CPU->id, THREAD->tid);
printf("cpu%u, tid %" PRIu64 " w-\n", CPU->id, THREAD->tid);
atomic_dec(&thread_count);
}
 
119,24 → 119,24
to = random(2000);
if (!sh_quiet)
printf("cpu%d, tid %llu r+ (%d)\n", CPU->id, THREAD->tid, to);
printf("cpu%u, tid %" PRIu64 " r+ (%d)\n", CPU->id, THREAD->tid, to);
rc = rwlock_read_lock_timeout(&rwlock, to);
if (SYNCH_FAILED(rc)) {
if (!sh_quiet)
printf("cpu%d, tid %llu r!\n", CPU->id, THREAD->tid);
printf("cpu%u, tid %" PRIu64 " r!\n", CPU->id, THREAD->tid);
atomic_dec(&thread_count);
return;
}
if (!sh_quiet)
printf("cpu%d, tid %llu r=\n", CPU->id, THREAD->tid);
printf("cpu%u, tid %" PRIu64 " r=\n", CPU->id, THREAD->tid);
thread_usleep(30000);
rwlock_read_unlock(&rwlock);
if (!sh_quiet)
printf("cpu%d, tid %llu r-\n", CPU->id, THREAD->tid);
printf("cpu%u, tid %" PRIu64 " r-\n", CPU->id, THREAD->tid);
atomic_dec(&thread_count);
}
 
159,8 → 159,8
context_save(&ctx);
if (!quiet) {
printf("sp=%#x, readers_in=%d\n", ctx.sp, rwlock.readers_in);
printf("Creating %d readers\n", rd);
printf("sp=%#x, readers_in=%" PRIc "\n", ctx.sp, rwlock.readers_in);
printf("Creating %" PRIu32 " readers\n", rd);
}
for (i = 0; i < rd; i++) {
168,11 → 168,11
if (thrd)
thread_ready(thrd);
else if (!quiet)
printf("Could not create reader %d\n", i);
printf("Could not create reader %" PRIu32 "\n", i);
}
 
if (!quiet)
printf("Creating %d writers\n", wr);
printf("Creating %" PRIu32 " writers\n", wr);
for (i = 0; i < wr; i++) {
thrd = thread_create(writer, NULL, TASK, 0, "writer", false);
179,7 → 179,7
if (thrd)
thread_ready(thrd);
else if (!quiet)
printf("Could not create writer %d\n", i);
printf("Could not create writer %" PRIu32 "\n", i);
}
thread_usleep(20000);
187,7 → 187,7
while (atomic_get(&thread_count) > 0) {
if (!quiet)
printf("Threads left: %d\n", atomic_get(&thread_count));
printf("Threads left: %ld\n", atomic_get(&thread_count));
thread_sleep(1);
}
/branches/tracing/kernel/test/synch/semaphore2.c
67,18 → 67,18
waitq_sleep(&can_start);
to = random(20000);
printf("cpu%d, tid %llu down+ (%d)\n", CPU->id, THREAD->tid, to);
printf("cpu%u, tid %" PRIu64 " down+ (%d)\n", CPU->id, THREAD->tid, to);
rc = semaphore_down_timeout(&sem, to);
if (SYNCH_FAILED(rc)) {
printf("cpu%d, tid %llu down!\n", CPU->id, THREAD->tid);
printf("cpu%u, tid %" PRIu64 " down!\n", CPU->id, THREAD->tid);
return;
}
printf("cpu%d, tid %llu down=\n", CPU->id, THREAD->tid);
printf("cpu%u, tid %" PRIu64 " down=\n", CPU->id, THREAD->tid);
thread_usleep(random(30000));
semaphore_up(&sem);
printf("cpu%d, tid %llu up\n", CPU->id, THREAD->tid);
printf("cpu%u, tid %" PRIu64 " up\n", CPU->id, THREAD->tid);
}
 
char * test_semaphore2(bool quiet)
91,7 → 91,7
thread_t *thrd;
k = random(7) + 1;
printf("Creating %d consumers\n", k);
printf("Creating %" PRIu32 " consumers\n", k);
for (i = 0; i < k; i++) {
thrd = thread_create(consumer, NULL, TASK, 0, "consumer", false);
if (thrd)
/branches/tracing/kernel/test/synch/rwlock5.c
108,7 → 108,7
waitq_wakeup(&can_start, WAKEUP_ALL);
while ((items_read.count != readers) || (items_written.count != writers)) {
printf("%zd readers remaining, %zd writers remaining, readers_in=%zd\n", readers - items_read.count, writers - items_written.count, rwlock.readers_in);
printf("%d readers remaining, %d writers remaining, readers_in=%d\n", readers - items_read.count, writers - items_written.count, rwlock.readers_in);
thread_usleep(100000);
}
}
/branches/tracing/kernel/test/test.h
36,12 → 36,13
#define KERN_TEST_H_
 
#include <arch/types.h>
#include <typedefs.h>
 
typedef char * (* test_entry_t)(bool);
typedef char *(*test_entry_t)(bool);
 
typedef struct {
char * name;
char * desc;
char *name;
char *desc;
test_entry_t entry;
bool safe;
} test_t;
/branches/tracing/kernel/test/thread/thread1.c
48,7 → 48,7
 
while (atomic_get(&finish)) {
if (!sh_quiet)
printf("%llu ", THREAD->tid);
printf("%" PRIu64 " ", THREAD->tid);
thread_usleep(100000);
}
atomic_inc(&threads_finished);
/branches/tracing/kernel/test/mm/falloc2.c
55,10 → 55,10
uint8_t val = THREAD->tid % THREADS;
index_t k;
uintptr_t * frames = (uintptr_t *) malloc(MAX_FRAMES * sizeof(uintptr_t), FRAME_ATOMIC);
void **frames = (void **) malloc(MAX_FRAMES * sizeof(void *), FRAME_ATOMIC);
if (frames == NULL) {
if (!sh_quiet)
printf("Thread #%llu (cpu%d): Unable to allocate frames\n", THREAD->tid, CPU->id);
printf("Thread #%" PRIu64 " (cpu%u): Unable to allocate frames\n", THREAD->tid, CPU->id);
atomic_inc(&thread_fail);
atomic_dec(&thread_count);
return;
69,11 → 69,11
for (run = 0; run < THREAD_RUNS; run++) {
for (order = 0; order <= MAX_ORDER; order++) {
if (!sh_quiet)
printf("Thread #%llu (cpu%d): Allocating %d frames blocks ... \n", THREAD->tid, CPU->id, 1 << order);
printf("Thread #%" PRIu64 " (cpu%u): Allocating %d frames blocks ... \n", THREAD->tid, CPU->id, 1 << order);
allocated = 0;
for (i = 0; i < (MAX_FRAMES >> order); i++) {
frames[allocated] = (uintptr_t)frame_alloc(order, FRAME_ATOMIC | FRAME_KA);
frames[allocated] = frame_alloc(order, FRAME_ATOMIC | FRAME_KA);
if (frames[allocated]) {
memsetb(frames[allocated], FRAME_SIZE << order, val);
allocated++;
82,16 → 82,16
}
if (!sh_quiet)
printf("Thread #%llu (cpu%d): %d blocks allocated.\n", THREAD->tid, CPU->id, allocated);
printf("Thread #%" PRIu64 " (cpu%u): %d blocks allocated.\n", THREAD->tid, CPU->id, allocated);
if (!sh_quiet)
printf("Thread #%llu (cpu%d): Deallocating ... \n", THREAD->tid, CPU->id);
printf("Thread #%" PRIu64 " (cpu%u): Deallocating ... \n", THREAD->tid, CPU->id);
for (i = 0; i < allocated; i++) {
for (k = 0; k <= (((index_t) FRAME_SIZE << order) - 1); k++) {
if (((uint8_t *) frames[i])[k] != val) {
if (!sh_quiet)
printf("Thread #%llu (cpu%d): Unexpected data (%d) in block %p offset %#zx\n", THREAD->tid, CPU->id, ((char *) frames[i])[k], frames[i], k);
printf("Thread #%" PRIu64 " (cpu%u): Unexpected data (%c) in block %p offset %#" PRIi "\n", THREAD->tid, CPU->id, ((char *) frames[i])[k], frames[i], k);
atomic_inc(&thread_fail);
goto cleanup;
}
100,7 → 100,7
}
if (!sh_quiet)
printf("Thread #%llu (cpu%d): Finished run.\n", THREAD->tid, CPU->id);
printf("Thread #%" PRIu64 " (cpu%u): Finished run.\n", THREAD->tid, CPU->id);
}
}
 
108,7 → 108,7
free(frames);
if (!sh_quiet)
printf("Thread #%llu (cpu%d): Exiting\n", THREAD->tid, CPU->id);
printf("Thread #%" PRIu64 " (cpu%u): Exiting\n", THREAD->tid, CPU->id);
atomic_dec(&thread_count);
}
 
124,7 → 124,7
thread_t * thrd = thread_create(falloc, NULL, TASK, 0, "falloc", false);
if (!thrd) {
if (!quiet)
printf("Could not create thread %d\n", i);
printf("Could not create thread %u\n", i);
break;
}
thread_ready(thrd);
132,7 → 132,7
while (atomic_get(&thread_count) > 0) {
if (!quiet)
printf("Threads left: %d\n", atomic_get(&thread_count));
printf("Threads left: %ld\n", atomic_get(&thread_count));
thread_sleep(1);
}
/branches/tracing/kernel/test/mm/slab1.c
53,7 → 53,7
for (i = 0; i < count; i++) {
data[i] = slab_alloc(cache, 0);
memsetb((uintptr_t) data[i], size, 0);
memsetb(data[i], size, 0);
}
if (!quiet) {
71,7 → 71,7
for (i = 0; i < count; i++) {
data[i] = slab_alloc(cache, 0);
memsetb((uintptr_t) data[i], size, 0);
memsetb(data[i], size, 0);
}
if (!quiet) {
89,7 → 89,7
for (i = count / 2; i < count; i++) {
data[i] = slab_alloc(cache, 0);
memsetb((uintptr_t) data[i], size, 0);
memsetb(data[i], size, 0);
}
if (!quiet) {
137,7 → 137,7
thread_detach(THREAD);
if (!sh_quiet)
printf("Starting thread #%llu...\n", THREAD->tid);
printf("Starting thread #%" PRIu64 "...\n", THREAD->tid);
for (j = 0; j < 10; j++) {
for (i = 0; i < THR_MEM_COUNT; i++)
151,7 → 151,7
}
if (!sh_quiet)
printf("Thread #%llu finished\n", THREAD->tid);
printf("Thread #%" PRIu64 " finished\n", THREAD->tid);
semaphore_up(&thr_sem);
}
/branches/tracing/kernel/test/mm/slab2.c
68,8 → 68,8
slab_free(cache2, data2);
break;
}
memsetb((uintptr_t) data1, ITEM_SIZE, 0);
memsetb((uintptr_t) data2, ITEM_SIZE, 0);
memsetb(data1, ITEM_SIZE, 0);
memsetb(data2, ITEM_SIZE, 0);
*((void **) data1) = olddata1;
*((void **) data2) = olddata2;
olddata1 = data1;
100,7 → 100,7
printf("Incorrect memory size - use another test.");
return;
}
memsetb((uintptr_t) data1, ITEM_SIZE, 0);
memsetb(data1, ITEM_SIZE, 0);
*((void **) data1) = olddata1;
olddata1 = data1;
}
108,7 → 108,7
data1 = slab_alloc(cache1, FRAME_ATOMIC);
if (!data1)
break;
memsetb((uintptr_t) data1, ITEM_SIZE, 0);
memsetb(data1, ITEM_SIZE, 0);
*((void **) data1) = olddata1;
olddata1 = data1;
}
150,11 → 150,11
mutex_unlock(&starter_mutex);
if (!sh_quiet)
printf("Starting thread #%llu...\n",THREAD->tid);
printf("Starting thread #%" PRIu64 "...\n", THREAD->tid);
 
/* Alloc all */
if (!sh_quiet)
printf("Thread #%llu allocating...\n", THREAD->tid);
printf("Thread #%" PRIu64 " allocating...\n", THREAD->tid);
while (1) {
/* Call with atomic to detect end of memory */
166,7 → 166,7
}
if (!sh_quiet)
printf("Thread #%llu releasing...\n", THREAD->tid);
printf("Thread #%" PRIu64 " releasing...\n", THREAD->tid);
while (data) {
new = *((void **)data);
176,7 → 176,7
}
if (!sh_quiet)
printf("Thread #%llu allocating...\n", THREAD->tid);
printf("Thread #%" PRIu64 " allocating...\n", THREAD->tid);
while (1) {
/* Call with atomic to detect end of memory */
188,7 → 188,7
}
if (!sh_quiet)
printf("Thread #%llu releasing...\n", THREAD->tid);
printf("Thread #%" PRIu64 " releasing...\n", THREAD->tid);
while (data) {
new = *((void **)data);
198,7 → 198,7
}
if (!sh_quiet)
printf("Thread #%llu finished\n", THREAD->tid);
printf("Thread #%" PRIu64 " finished\n", THREAD->tid);
slab_print_list();
semaphore_up(&thr_sem);
/branches/tracing/kernel/test/fpu/mips2.c
72,7 → 72,7
if (arg != after_arg) {
if (!sh_quiet)
printf("General reg tid%llu: arg(%d) != %d\n", THREAD->tid, arg, after_arg);
printf("General reg tid%" PRIu64 ": arg(%d) != %d\n", THREAD->tid, arg, after_arg);
atomic_inc(&threads_fault);
break;
}
104,7 → 104,7
if (arg != after_arg) {
if (!sh_quiet)
printf("General reg tid%llu: arg(%d) != %d\n", THREAD->tid, arg, after_arg);
printf("General reg tid%" PRIu64 ": arg(%d) != %d\n", THREAD->tid, arg, after_arg);
atomic_inc(&threads_fault);
break;
}
123,7 → 123,7
atomic_set(&threads_fault, 0);
if (!quiet)
printf("Creating %d threads... ", 2 * THREADS);
printf("Creating %u threads... ", 2 * THREADS);
 
for (i = 0; i < THREADS; i++) {
thread_t *t;
130,7 → 130,7
if (!(t = thread_create(testit1, (void *) ((unative_t) 2 * i), TASK, 0, "testit1", false))) {
if (!quiet)
printf("could not create thread %d\n", 2 * i);
printf("could not create thread %u\n", 2 * i);
break;
}
thread_ready(t);
138,7 → 138,7
if (!(t = thread_create(testit2, (void *) ((unative_t) 2 * i + 1), TASK, 0, "testit2", false))) {
if (!quiet)
printf("could not create thread %d\n", 2 * i + 1);
printf("could not create thread %u\n", 2 * i + 1);
break;
}
thread_ready(t);
/branches/tracing/kernel/test/fpu/fpu1.c
126,7 → 126,7
 
if ((int) (100000000 * e) != E_10e8) {
if (!sh_quiet)
printf("tid%llu: e*10e8=%zd should be %zd\n", THREAD->tid, (unative_t) (100000000 * e), (unative_t) E_10e8);
printf("tid%" PRIu64 ": e*10e8=%zd should be %" PRIun "\n", THREAD->tid, (unative_t) (100000000 * e), (unative_t) E_10e8);
atomic_inc(&threads_fault);
break;
}
161,7 → 161,7
#ifdef KERN_ia64_ARCH_H_
if ((int) (1000000 * pi) != PI_10e8) {
if (!sh_quiet)
printf("tid%llu: pi*10e8=%zd should be %zd\n", THREAD->tid, (unative_t) (1000000 * pi), (unative_t) (PI_10e8 / 100));
printf("tid%" PRIu64 ": pi*10e8=%zd should be %" PRIun "\n", THREAD->tid, (unative_t) (1000000 * pi), (unative_t) (PI_10e8 / 100));
atomic_inc(&threads_fault);
break;
}
168,7 → 168,7
#else
if ((int) (100000000 * pi) != PI_10e8) {
if (!sh_quiet)
printf("tid%llu: pi*10e8=%zd should be %zd\n", THREAD->tid, (unative_t) (100000000 * pi), (unative_t) PI_10e8);
printf("tid%" PRIu64 ": pi*10e8=%zd should be %" PRIun "\n", THREAD->tid, (unative_t) (100000000 * pi), (unative_t) PI_10e8);
atomic_inc(&threads_fault);
break;
}
187,7 → 187,7
atomic_set(&threads_fault, 0);
if (!quiet)
printf("Creating %d threads... ", 2 * THREADS);
printf("Creating %u threads... ", 2 * THREADS);
 
for (i = 0; i < THREADS; i++) {
thread_t *t;
194,7 → 194,7
if (!(t = thread_create(e, NULL, TASK, 0, "e", false))) {
if (!quiet)
printf("could not create thread %d\n", 2 * i);
printf("could not create thread %u\n", 2 * i);
break;
}
thread_ready(t);
202,7 → 202,7
if (!(t = thread_create(pi, NULL, TASK, 0, "pi", false))) {
if (!quiet)
printf("could not create thread %d\n", 2 * i + 1);
printf("could not create thread %u\n", 2 * i + 1);
break;
}
thread_ready(t);
/branches/tracing/kernel/test/fpu/sse1.c
72,7 → 72,7
if (arg != after_arg) {
if (!sh_quiet)
printf("tid%llu: arg(%d) != %d\n", THREAD->tid, arg, after_arg);
printf("tid%" PRIu64 ": arg(%d) != %d\n", THREAD->tid, arg, after_arg);
atomic_inc(&threads_fault);
break;
}
104,7 → 104,7
if (arg != after_arg) {
if (!sh_quiet)
printf("tid%llu: arg(%d) != %d\n", THREAD->tid, arg, after_arg);
printf("tid%" PRIu64 ": arg(%d) != %d\n", THREAD->tid, arg, after_arg);
atomic_inc(&threads_fault);
break;
}
123,7 → 123,7
atomic_set(&threads_fault, 0);
if (!quiet)
printf("Creating %d threads... ", 2 * THREADS);
printf("Creating %u threads... ", 2 * THREADS);
 
for (i = 0; i < THREADS; i++) {
thread_t *t;
130,7 → 130,7
if (!(t = thread_create(testit1, (void *) ((unative_t) 2 * i), TASK, 0, "testit1", false))) {
if (!quiet)
printf("could not create thread %d\n", 2 * i);
printf("could not create thread %u\n", 2 * i);
break;
}
thread_ready(t);
138,7 → 138,7
if (!(t = thread_create(testit2, (void *) ((unative_t) 2 * i + 1), TASK, 0, "testit2", false))) {
if (!quiet)
printf("could not create thread %d\n", 2 * i + 1);
printf("could not create thread %u\n", 2 * i + 1);
break;
}
thread_ready(t);
/branches/tracing/kernel/test/print/print1.c
44,12 → 44,12
printf(" text 8.10s %8.10s \n", "text");
printf(" very long text 8.10s %8.10s \n", "very long text");
printf(" char: c '%c', 3.2c '%3.2c', -3.2c '%-3.2c', 2.3c '%2.3c', -2.3c '%-2.3c' \n",'a', 'b', 'c', 'd', 'e' );
printf(" int: d '%d', 3.2d '%3.2d', -3.2d '%-3.2d', 2.3d '%2.3d', -2.3d '%-2.3d' \n",1, 1, 1, 1, 1 );
printf(" -int: d '%d', 3.2d '%3.2d', -3.2d '%-3.2d', 2.3d '%2.3d', -2.3d '%-2.3d' \n",-1, -1, -1, -1, -1 );
printf(" 0xint: x '%#x', 5.3x '%#5.3x', -5.3x '%#-5.3x', 3.5x '%#3.5x', -3.5x '%#-3.5x' \n",17, 17, 17, 17, 17 );
printf(" char: c '%c', 3.2c '%3.2c', -3.2c '%-3.2c', 2.3c '%2.3c', -2.3c '%-2.3c' \n", 'a', 'b', 'c', 'd', 'e');
printf(" int: d '%d', 3.2d '%3.2d', -3.2d '%-3.2d', 2.3d '%2.3d', -2.3d '%-2.3d' \n", 1, 1, 1, 1, 1);
printf(" -int: d '%d', 3.2d '%3.2d', -3.2d '%-3.2d', 2.3d '%2.3d', -2.3d '%-2.3d' \n", -1, -1, -1, -1, -1);
printf(" 0xint: x '%#x', 5.3x '%#5.3x', -5.3x '%#-5.3x', 3.5x '%#3.5x', -3.5x '%#-3.5x' \n", 17, 17, 17, 17, 17);
printf("'%#llx' 64bit, '%#x' 32bit, '%#hhx' 8bit, '%#hx' 16bit, unative_t '%#zx'. '%#llx' 64bit and '%s' string.\n", 0x1234567887654321ll, 0x12345678, 0x12, 0x1234, nat, 0x1234567887654321ull, "Lovely string" );
printf("'%#llx' 64bit, '%#x' 32bit, '%#hhx' 8bit, '%#hx' 16bit, unative_t '%#" PRIxn "'. '%#llx' 64bit and '%s' string.\n", 0x1234567887654321ll, 0x12345678, 0x12, 0x1234, nat, 0x1234567887654321ull, "Lovely string" );
printf(" Print to NULL '%s'\n", NULL);
/branches/tracing/kernel/kernel.config
84,13 → 84,8
@ "indy" SGI Indy
! [ARCH=mips32] MACHINE (choice)
 
# Machine type
@ "gxemul_testarm" GXEmul testarm
! [ARCH=arm32] MACHINE (choice)
 
 
# Framebuffer support
! [(ARCH=mips32&MACHINE=lgxemul)|(ARCH=mips32&MACHINE=bgxemul)|(ARCH=ia32)|(ARCH=amd64)|(ARCH=arm32&MACHINE=gxemul_testarm)] CONFIG_FB (y/n)
! [(ARCH=mips32&MACHINE=lgxemul)|(ARCH=mips32&MACHINE=bgxemul)|(ARCH=ia32)|(ARCH=amd64)|(ARCH=arm32)] CONFIG_FB (y/n)
 
# Framebuffer width
@ "640"
156,6 → 151,9
# General debuging and assert checking
! CONFIG_DEBUG (y/n)
 
# Extensive debugging output
! [CONFIG_DEBUG=y] CONFIG_EDEBUG (n/y)
 
# Deadlock detection support for spinlocks
! [CONFIG_DEBUG=y&CONFIG_SMP=y] CONFIG_DEBUG_SPINLOCK (y/n)
 
/branches/tracing/kernel/genarch/include/ofw/ofw_tree.h
30,6 → 30,7
#define KERN_OFW_TREE_H_
 
#include <arch/types.h>
#include <typedefs.h>
 
#define OFW_TREE_PROPERTY_MAX_NAMELEN 32
 
/branches/tracing/kernel/genarch/src/mm/page_pt.c
76,7 → 76,7
 
if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT) {
newpt = (pte_t *)frame_alloc(PTL1_SIZE, FRAME_KA);
memsetb((uintptr_t)newpt, FRAME_SIZE << PTL1_SIZE, 0);
memsetb(newpt, FRAME_SIZE << PTL1_SIZE, 0);
SET_PTL1_ADDRESS(ptl0, PTL0_INDEX(page), KA2PA(newpt));
SET_PTL1_FLAGS(ptl0, PTL0_INDEX(page), PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | PAGE_WRITE);
}
85,7 → 85,7
 
if (GET_PTL2_FLAGS(ptl1, PTL1_INDEX(page)) & PAGE_NOT_PRESENT) {
newpt = (pte_t *)frame_alloc(PTL2_SIZE, FRAME_KA);
memsetb((uintptr_t)newpt, FRAME_SIZE << PTL2_SIZE, 0);
memsetb(newpt, FRAME_SIZE << PTL2_SIZE, 0);
SET_PTL2_ADDRESS(ptl1, PTL1_INDEX(page), KA2PA(newpt));
SET_PTL2_FLAGS(ptl1, PTL1_INDEX(page), PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | PAGE_WRITE);
}
94,7 → 94,7
 
if (GET_PTL3_FLAGS(ptl2, PTL2_INDEX(page)) & PAGE_NOT_PRESENT) {
newpt = (pte_t *)frame_alloc(PTL3_SIZE, FRAME_KA);
memsetb((uintptr_t)newpt, FRAME_SIZE << PTL3_SIZE, 0);
memsetb(newpt, FRAME_SIZE << PTL3_SIZE, 0);
SET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page), KA2PA(newpt));
SET_PTL3_FLAGS(ptl2, PTL2_INDEX(page), PAGE_PRESENT | PAGE_USER | PAGE_EXEC | PAGE_CACHEABLE | PAGE_WRITE);
}
146,7 → 146,7
ptl3 = (pte_t *) PA2KA(GET_PTL3_ADDRESS(ptl2, PTL2_INDEX(page)));
 
/* Destroy the mapping. Setting to PAGE_NOT_PRESENT is not sufficient. */
memsetb((uintptr_t) &ptl3[PTL3_INDEX(page)], sizeof(pte_t), 0);
memsetb(&ptl3[PTL3_INDEX(page)], sizeof(pte_t), 0);
 
/*
* Second, free all empty tables along the way from PTL3 down to PTL0.
166,11 → 166,11
*/
frame_free(KA2PA((uintptr_t) ptl3));
if (PTL2_ENTRIES)
memsetb((uintptr_t) &ptl2[PTL2_INDEX(page)], sizeof(pte_t), 0);
memsetb(&ptl2[PTL2_INDEX(page)], sizeof(pte_t), 0);
else if (PTL1_ENTRIES)
memsetb((uintptr_t) &ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0);
memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0);
else
memsetb((uintptr_t) &ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
} else {
/*
* PTL3 is not empty.
195,9 → 195,9
*/
frame_free(KA2PA((uintptr_t) ptl2));
if (PTL1_ENTRIES)
memsetb((uintptr_t) &ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0);
memsetb(&ptl1[PTL1_INDEX(page)], sizeof(pte_t), 0);
else
memsetb((uintptr_t) &ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
}
else {
/*
223,7 → 223,7
* Release the frame and remove PTL1 pointer from preceding table.
*/
frame_free(KA2PA((uintptr_t) ptl1));
memsetb((uintptr_t) &ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
memsetb(&ptl0[PTL0_INDEX(page)], sizeof(pte_t), 0);
}
}
 
/branches/tracing/kernel/genarch/src/mm/as_pt.c
103,7 → 103,7
table_size = FRAME_SIZE << PTL0_SIZE;
 
if (flags & FLAG_AS_KERNEL) {
memsetb((uintptr_t) dst_ptl0, table_size, 0);
memsetb(dst_ptl0, table_size, 0);
} else {
uintptr_t src, dst;
118,7 → 118,7
src = (uintptr_t) &src_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)];
dst = (uintptr_t) &dst_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)];
 
memsetb((uintptr_t) dst_ptl0, table_size, 0);
memsetb(dst_ptl0, table_size, 0);
memcpy((void *) dst, (void *) src, table_size - (src - (uintptr_t) src_ptl0));
mutex_unlock(&AS_KERNEL->lock);
interrupts_restore(ipl);
/branches/tracing/kernel/genarch/src/acpi/madt.c
126,7 → 126,7
 
int madt_irq_to_pin(unsigned int irq)
{
ASSERT(irq < sizeof(isa_irq_map)/sizeof(int));
ASSERT(irq < sizeof(isa_irq_map) / sizeof(int));
return isa_irq_map[irq];
}
 
184,15 → 184,15
case MADT_IO_SAPIC:
case MADT_L_SAPIC:
case MADT_PLATFORM_INTR_SRC:
printf("MADT: skipping %s entry (type=%zd)\n", entry[h->type], h->type);
printf("MADT: skipping %s entry (type=%" PRIu8 ")\n", entry[h->type], h->type);
break;
default:
if (h->type >= MADT_RESERVED_SKIP_BEGIN && h->type <= MADT_RESERVED_SKIP_END) {
printf("MADT: skipping reserved entry (type=%zd)\n", h->type);
printf("MADT: skipping reserved entry (type=%" PRIu8 ")\n", h->type);
}
if (h->type >= MADT_RESERVED_OEM_BEGIN) {
printf("MADT: skipping OEM entry (type=%zd)\n", h->type);
printf("MADT: skipping OEM entry (type=%" PRIu8 ")\n", h->type);
}
break;
}
233,8 → 233,8
 
void madt_intr_src_ovrd_entry(struct madt_intr_src_ovrd *override, uint32_t index)
{
ASSERT(override->source < sizeof(isa_irq_map)/sizeof(int));
printf("MADT: ignoring %s entry: bus=%zd, source=%zd, global_int=%zd, flags=%#hx\n",
ASSERT(override->source < sizeof(isa_irq_map) / sizeof(int));
printf("MADT: ignoring %s entry: bus=%" PRIu8 ", source=%" PRIu8 ", global_int=%" PRIu32 ", flags=%#" PRIx16 "\n",
entry[override->header.type], override->bus, override->source,
override->global_int, override->flags);
}
/branches/tracing/kernel/genarch/src/acpi/acpi.c
105,7 → 105,7
if (!acpi_sdt_check((uint8_t *) h))
goto next;
*signature_map[j].sdt_ptr = h;
printf("%#zp: ACPI %s\n", *signature_map[j].sdt_ptr, signature_map[j].description);
printf("%p: ACPI %s\n", *signature_map[j].sdt_ptr, signature_map[j].description);
}
}
next:
126,7 → 126,7
if (!acpi_sdt_check((uint8_t *) h))
goto next;
*signature_map[j].sdt_ptr = h;
printf("%#zp: ACPI %s\n", *signature_map[j].sdt_ptr, signature_map[j].description);
printf("%p: ACPI %s\n", *signature_map[j].sdt_ptr, signature_map[j].description);
}
}
next:
160,7 → 160,7
return;
 
rsdp_found:
printf("%#zp: ACPI Root System Description Pointer\n", acpi_rsdp);
printf("%p: ACPI Root System Description Pointer\n", acpi_rsdp);
 
acpi_rsdt = (struct acpi_rsdt *) (unative_t) acpi_rsdp->rsdt_address;
if (acpi_rsdp->revision) acpi_xsdt = (struct acpi_xsdt *) ((uintptr_t) acpi_rsdp->xsdt_address);
169,11 → 169,11
if (acpi_xsdt) map_sdt((struct acpi_sdt_header *) acpi_xsdt);
 
if (acpi_rsdt && !acpi_sdt_check((uint8_t *) acpi_rsdt)) {
printf("RSDT: %s\n", "bad checksum");
printf("RSDT: bad checksum\n");
return;
}
if (acpi_xsdt && !acpi_sdt_check((uint8_t *) acpi_xsdt)) {
printf("XSDT: %s\n", "bad checksum");
printf("XSDT: bad checksum\n");
return;
}
 
/branches/tracing/kernel/generic/include/stackarg.h
52,9 → 52,9
(ap).last = (uint8_t *) &(lst)
 
#define va_arg(ap, type) \
(*((type *)((ap).last + ((ap).pos += sizeof(type) ) - sizeof(type))))
(*((type *)((ap).last + ((ap).pos += sizeof(type)) - sizeof(type))))
 
#define va_copy(dst,src) dst=src
#define va_copy(dst, src) dst = src
#define va_end(ap)
 
 
/branches/tracing/kernel/generic/include/proc/task.h
129,7 → 129,6
extern void task_done(void);
extern task_t *task_create(as_t *as, char *name);
extern void task_destroy(task_t *t);
extern task_t *task_run_program(void *program_addr, char *name);
extern task_t *task_find_by_id(task_id_t id);
extern int task_kill(task_id_t id);
extern uint64_t task_get_accounting(task_t *t);
146,6 → 145,7
#endif
 
extern unative_t sys_task_get_id(task_id_t *uspace_task_id);
extern unative_t sys_task_spawn(void *image, size_t size);
 
#endif
 
/branches/tracing/kernel/generic/include/proc/thread.h
252,6 → 252,8
extern void thread_update_accounting(void);
extern bool thread_exists(thread_t *t);
 
extern thread_t *thread_create_program(void *program_addr, char *name);
 
/** Fpu context slab cache. */
extern slab_cache_t *fpu_context_slab;
 
/branches/tracing/kernel/generic/include/debug.h
38,11 → 38,11
#include <panic.h>
#include <arch/debug.h>
 
#define CALLER ((uintptr_t)__builtin_return_address(0))
#define CALLER ((uintptr_t) __builtin_return_address(0))
 
#ifndef HERE
/** Current Instruction Pointer address */
# define HERE ((uintptr_t *) 0)
# define HERE ((uintptr_t *) 0)
#endif
 
/** Debugging ASSERT macro
55,12 → 55,51
*
*/
#ifdef CONFIG_DEBUG
# define ASSERT(expr) if (!(expr)) { panic("assertion failed (%s), caller=%.*p\n", #expr, sizeof(uintptr_t) * 2, CALLER); }
# define ASSERT(expr) \
if (!(expr)) { \
panic("assertion failed (%s), caller=%p\n", #expr, CALLER); \
}
#else
# define ASSERT(expr)
#endif
 
/** Extensive debugging output macro
*
* If CONFIG_EDEBUG is set, the LOG() macro
* will print whatever message is indicated plus
* an information about the location.
*
*/
 
#ifdef CONFIG_EDEBUG
# define LOG(format, ...) \
printf("%s() at %s:%u: " format "\n", __func__, __FILE__, \
__LINE__, ##__VA_ARGS__);
#else
# define LOG(format, ...)
#endif
 
/** Extensive debugging execute macro
*
* If CONFIG_EDEBUG is set, the LOG_EXEC() macro
* will print an information about calling a given
* function and call it.
*
*/
 
#ifdef CONFIG_EDEBUG
# define LOG_EXEC(fnc) \
{ \
printf("%s() at %s:%u: " #fnc "\n", __func__, __FILE__, \
__LINE__); \
fnc; \
}
#else
# define LOG_EXEC(fnc) fnc
#endif
 
 
#endif
 
/** @}
*/
/branches/tracing/kernel/generic/include/panic.h
36,15 → 36,15
#define KERN_PANIC_H_
 
#ifdef CONFIG_DEBUG
#define panic(format, ...) \
panic_printf("Kernel panic in %s() at %s on line %d: " format, __func__, \
__FILE__, __LINE__, ##__VA_ARGS__);
# define panic(format, ...) \
panic_printf("Kernel panic in %s() at %s:%u: " format, __func__, \
__FILE__, __LINE__, ##__VA_ARGS__);
#else
#define panic(format, ...) \
panic_printf("Kernel panic: " format, ##__VA_ARGS__);
# define panic(format, ...) \
panic_printf("Kernel panic: " format, ##__VA_ARGS__);
#endif
 
extern void panic_printf(char *fmt, ...) __attribute__((noreturn)) ;
extern void panic_printf(char *fmt, ...) __attribute__((noreturn));
 
#endif
 
/branches/tracing/kernel/generic/include/interrupt.h
40,7 → 40,6
#include <proc/task.h>
#include <proc/thread.h>
#include <arch.h>
#include <console/klog.h>
#include <ddi/irq.h>
 
typedef void (* iroutine)(int n, istate_t *istate);
49,8 → 48,8
{ \
if (istate_from_uspace(istate)) { \
task_t *task = TASK; \
klog_printf("Task %llu killed due to an exception at %p.", task->taskid, istate_get_pc(istate)); \
klog_printf(" " cmd, ##__VA_ARGS__); \
printf("Task %" PRIu64 " killed due to an exception at %p.", task->taskid, istate_get_pc(istate)); \
printf(" " cmd, ##__VA_ARGS__); \
task_kill(task->taskid); \
thread_exit(); \
} \
/branches/tracing/kernel/generic/include/synch/spinlock.h
110,8 → 110,8
#define DEADLOCK_PROBE(pname, value) \
if ((pname)++ > (value)) { \
(pname) = 0; \
printf("Deadlock probe %s: exceeded threshold %d\n", \
"cpu%d: function=%s, line=%d\n", \
printf("Deadlock probe %s: exceeded threshold %u\n", \
"cpu%u: function=%s, line=%u\n", \
#pname, (value), CPU->id, __func__, __LINE__); \
}
#else
/branches/tracing/kernel/generic/include/memstr.h
42,8 → 42,8
* Architecture independent variants.
*/
extern void *_memcpy(void *dst, const void *src, size_t cnt);
extern void _memsetb(uintptr_t dst, size_t cnt, uint8_t x);
extern void _memsetw(uintptr_t dst, size_t cnt, uint16_t x);
extern void _memsetb(void *dst, size_t cnt, uint8_t x);
extern void _memsetw(void *dst, size_t cnt, uint16_t x);
extern char *strcpy(char *dest, const char *src);
 
#endif
/branches/tracing/kernel/generic/include/ddi/device.h
35,6 → 35,9
#ifndef KERN_DEVICE_H_
#define KERN_DEVICE_H_
 
#include <arch/types.h>
#include <typedefs.h>
 
extern devno_t device_assign_devno(void);
 
#endif
/branches/tracing/kernel/generic/include/console/klog.h
File deleted
/branches/tracing/kernel/generic/include/console/console.h
41,6 → 41,9
extern chardev_t *stdin;
extern chardev_t *stdout;
 
extern void klog_init(void);
extern void klog_update(void);
 
extern uint8_t getc(chardev_t *chardev);
uint8_t _getc(chardev_t *chardev);
extern count_t gets(chardev_t *chardev, char *buf, size_t buflen);
/branches/tracing/kernel/generic/include/adt/list.h
36,6 → 36,7
#define KERN_LIST_H_
 
#include <arch/types.h>
#include <typedefs.h>
 
/** Doubly linked list head and link type. */
typedef struct link {
/branches/tracing/kernel/generic/include/adt/avl.h
36,6 → 36,7
#define KERN_AVLTREE_H_
 
#include <arch/types.h>
#include <typedefs.h>
 
/**
* Macro for getting a pointer to the structure which contains the avltree
/branches/tracing/kernel/generic/include/typedefs.h
35,8 → 35,22
#ifndef KERN_TYPEDEFS_H_
#define KERN_TYPEDEFS_H_
 
#include <arch/types.h>
 
#define NULL 0
#define false 0
#define true 1
 
typedef void (* function)();
 
typedef uint8_t bool;
typedef uint64_t thread_id_t;
typedef uint64_t task_id_t;
typedef uint32_t context_id_t;
 
typedef int32_t inr_t;
typedef int32_t devno_t;
 
#endif
 
/** @}
/branches/tracing/kernel/generic/include/syscall/syscall.h
36,17 → 36,23
#define KERN_SYSCALL_H_
 
typedef enum {
SYS_IO = 0,
SYS_KLOG = 0,
SYS_TLS_SET = 1, /* Hardcoded in AMD64, IA32 uspace - fibril.S */
SYS_THREAD_CREATE,
SYS_THREAD_EXIT,
SYS_THREAD_GET_ID,
SYS_TASK_GET_ID,
SYS_TASK_SPAWN,
SYS_FUTEX_SLEEP,
SYS_FUTEX_WAKEUP,
SYS_AS_AREA_CREATE,
SYS_AS_AREA_RESIZE,
SYS_AS_AREA_DESTROY,
SYS_IPC_CALL_SYNC_FAST,
SYS_IPC_CALL_SYNC_SLOW,
SYS_IPC_CALL_ASYNC_FAST,
58,13 → 64,17
SYS_IPC_HANGUP,
SYS_IPC_REGISTER_IRQ,
SYS_IPC_UNREGISTER_IRQ,
SYS_CAP_GRANT,
SYS_CAP_REVOKE,
SYS_PHYSMEM_MAP,
SYS_IOSPACE_ENABLE,
SYS_PREEMPT_CONTROL,
SYS_SYSINFO_VALID,
SYS_SYSINFO_VALUE,
SYS_DEBUG_ENABLE_CONSOLE,
SYS_IPC_CONNECT_KBOX,
SYSCALL_END
/branches/tracing/kernel/generic/src/synch/rwlock.c
231,7 → 231,7
interrupts_restore(ipl);
break;
case ESYNCH_OK_ATOMIC:
panic("_mutex_lock_timeout()==ESYNCH_OK_ATOMIC\n");
panic("_mutex_lock_timeout() == ESYNCH_OK_ATOMIC\n");
break;
default:
panic("invalid ESYNCH\n");
/branches/tracing/kernel/generic/src/synch/spinlock.c
106,9 → 106,8
continue;
#endif
if (i++ > DEADLOCK_THRESHOLD) {
printf("cpu%d: looping on spinlock %.*p:%s, "
"caller=%.*p", CPU->id, sizeof(uintptr_t) * 2, sl,
sl->name, sizeof(uintptr_t) * 2, CALLER);
printf("cpu%u: looping on spinlock %" PRIp ":%s, caller=%" PRIp,
CPU->id, sl, sl->name, CALLER);
symbol = get_symtab_entry(CALLER);
if (symbol)
printf("(%s)", symbol);
119,7 → 118,7
}
 
if (deadlock_reported)
printf("cpu%d: not deadlocked\n", CPU->id);
printf("cpu%u: not deadlocked\n", CPU->id);
 
/*
* Prevent critical section code from bleeding out this way up.
/branches/tracing/kernel/generic/src/main/kinit.c
153,40 → 153,50
panic("thread_create/kconsole\n");
 
interrupts_enable();
 
/*
* Create user tasks, load RAM disk images.
*/
count_t i;
thread_t *threads[CONFIG_INIT_TASKS];
for (i = 0; i < init.cnt; i++) {
/*
* Run user tasks, load RAM disk images.
*/
if (init.tasks[i].addr % FRAME_SIZE) {
printf("init[%d].addr is not frame aligned", i);
printf("init[%" PRIc "].addr is not frame aligned", i);
continue;
}
 
task_t *utask = task_run_program((void *) init.tasks[i].addr,
"uspace");
threads[i] = thread_create_program(
(void *) init.tasks[i].addr, "uspace");
if (utask) {
if (threads[i] != NULL) {
/*
* Set capabilities to init userspace tasks.
*/
cap_set(utask, CAP_CAP | CAP_MEM_MANAGER |
cap_set(threads[i]->task, CAP_CAP | CAP_MEM_MANAGER |
CAP_IO_MANAGER | CAP_PREEMPT_CONTROL | CAP_IRQ_REG);
if (!ipc_phone_0)
ipc_phone_0 = &utask->answerbox;
if (!ipc_phone_0)
ipc_phone_0 = &threads[i]->task->answerbox;
} else {
int rd = init_rd((rd_header_t *) init.tasks[i].addr,
init.tasks[i].size);
if (rd != RE_OK)
printf("Init binary %zd not used, error code %d.\n", i, rd);
printf("Init binary %" PRIc " not used, error code %d.\n", i, rd);
}
}
/*
* Run user tasks with reasonable delays
*/
for (i = 0; i < init.cnt; i++) {
if (threads[i] != NULL) {
thread_usleep(50000);
thread_ready(threads[i]);
}
}
 
 
if (!stdin) {
while (1) {
thread_sleep(1);
/branches/tracing/kernel/generic/src/main/main.c
78,9 → 78,9
#include <ipc/ipc.h>
#include <macros.h>
#include <adt/btree.h>
#include <console/klog.h>
#include <smp/smp.h>
#include <ddi/ddi.h>
#include <console/console.h>
 
/** Global configuration structure. */
config_t config;
131,9 → 131,11
 
/** Main kernel routine for bootstrap CPU.
*
* Initializes the kernel by bootstrap CPU.
* This function passes control directly to
* main_bsp_separated_stack().
* The code here still runs on the boot stack, which knows nothing about
* preemption counts. Because of that, this function cannot directly call
* functions that disable or enable preemption (e.g. spinlock_lock()). The
* primary task of this function is to calculate address of a new stack and
* switch to it.
*
* Assuming interrupts_disable().
*
186,90 → 188,93
*/
void main_bsp_separated_stack(void)
{
task_t *k;
thread_t *t;
count_t i;
/* Keep this the first thing. */
the_initialize(THE);
 
LOG();
version_print();
LOG("\nconfig.base=%#" PRIp " config.kernel_size=%" PRIs
"\nconfig.stack_base=%#" PRIp " config.stack_size=%" PRIs,
config.base, config.kernel_size, config.stack_base,
config.stack_size);
 
/*
* kconsole data structures must be initialized very early
* because other subsystems will register their respective
* commands.
*/
kconsole_init();
LOG_EXEC(kconsole_init());
/*
* Exception handler initialization, before architecture
* starts adding its own handlers
*/
exc_init();
LOG_EXEC(exc_init());
 
/*
* Memory management subsystems initialization.
*/
arch_pre_mm_init();
frame_init();
*/
LOG_EXEC(arch_pre_mm_init());
LOG_EXEC(frame_init());
/* Initialize at least 1 memory segment big enough for slab to work. */
slab_cache_init();
btree_init();
as_init();
page_init();
tlb_init();
ddi_init();
tasklet_init();
arch_post_mm_init();
LOG_EXEC(slab_cache_init());
LOG_EXEC(btree_init());
LOG_EXEC(as_init());
LOG_EXEC(page_init());
LOG_EXEC(tlb_init());
LOG_EXEC(ddi_init());
LOG_EXEC(tasklet_init());
LOG_EXEC(arch_post_mm_init());
LOG_EXEC(arch_pre_smp_init());
LOG_EXEC(smp_init());
version_print();
printf("kernel: %.*p hardcoded_ktext_size=%zd KB, "
"hardcoded_kdata_size=%zd KB\n", sizeof(uintptr_t) * 2,
config.base, SIZE2KB(hardcoded_ktext_size),
SIZE2KB(hardcoded_kdata_size));
printf("stack: %.*p size=%zd KB\n", sizeof(uintptr_t) * 2,
config.stack_base, SIZE2KB(config.stack_size));
arch_pre_smp_init();
smp_init();
/* Slab must be initialized after we know the number of processors. */
slab_enable_cpucache();
LOG_EXEC(slab_enable_cpucache());
printf("Detected %zu CPU(s), %llu MB free memory\n",
printf("Detected %" PRIc " CPU(s), %" PRIu64" MB free memory\n",
config.cpu_count, SIZE2MB(zone_total_size()));
cpu_init();
calibrate_delay_loop();
clock_counter_init();
timeout_init();
scheduler_init();
task_init();
thread_init();
futex_init();
klog_init();
LOG_EXEC(cpu_init());
LOG_EXEC(calibrate_delay_loop());
LOG_EXEC(clock_counter_init());
LOG_EXEC(timeout_init());
LOG_EXEC(scheduler_init());
LOG_EXEC(task_init());
LOG_EXEC(thread_init());
LOG_EXEC(futex_init());
if (init.cnt > 0) {
count_t i;
for (i = 0; i < init.cnt; i++)
printf("init[%zd].addr=%.*p, init[%zd].size=%zd\n", i,
sizeof(uintptr_t) * 2, init.tasks[i].addr, i,
init.tasks[i].size);
printf("init[%" PRIc "].addr=%#" PRIp ", init[%" PRIc
"].size=%#" PRIs "\n", i, init.tasks[i].addr,
i, init.tasks[i].size);
} else
printf("No init binaries found\n");
ipc_init();
LOG_EXEC(ipc_init());
LOG_EXEC(klog_init());
 
/*
* Create kernel task.
*/
k = task_create(AS_KERNEL, "kernel");
if (!k)
panic("can't create kernel task\n");
task_t *kernel = task_create(AS_KERNEL, "kernel");
if (!kernel)
panic("Can't create kernel task\n");
/*
* Create the first thread.
*/
t = thread_create(kinit, NULL, k, 0, "kinit", true);
if (!t)
panic("can't create kinit thread\n");
thread_ready(t);
thread_t *kinit_thread = thread_create(kinit, NULL, kernel, 0, "kinit",
true);
if (!kinit_thread)
panic("Can't create kinit thread\n");
LOG_EXEC(thread_ready(kinit_thread));
/*
* This call to scheduler() will return to kinit,
/branches/tracing/kernel/generic/src/debug/symtab.c
39,6 → 39,8
#include <byteorder.h>
#include <func.h>
#include <print.h>
#include <arch/types.h>
#include <typedefs.h>
 
/** Return entry that seems most likely to correspond to argument.
*
139,7 → 141,7
while (symtab_search_one(name, &i)) {
addr = uint64_t_le2host(symbol_table[i].address_le);
realname = symbol_table[i].symbol_name;
printf("%.*p: %s\n", sizeof(uintptr_t) * 2, addr, realname);
printf("%p: %s\n", addr, realname);
i++;
}
}
/branches/tracing/kernel/generic/src/cpu/cpu.c
67,7 → 67,7
panic("malloc/cpus");
 
/* initialize everything */
memsetb((uintptr_t) cpus, sizeof(cpu_t) * config.cpu_count, 0);
memsetb(cpus, sizeof(cpu_t) * config.cpu_count, 0);
 
for (i = 0; i < config.cpu_count; i++) {
cpus[i].stack = (uint8_t *) frame_alloc(STACK_FRAMES, FRAME_KA | FRAME_ATOMIC);
104,7 → 104,7
if (cpus[i].active)
cpu_print_report(&cpus[i]);
else
printf("cpu%d: not active\n", i);
printf("cpu%u: not active\n", i);
}
}
 
/branches/tracing/kernel/generic/src/sysinfo/sysinfo.c
177,7 → 177,7
sysinfo_item_t *item = sysinfo_create_path(name, root);
if (item != NULL) { /* If in subsystem, unable to create or return so unable to set */
item->val.val=val;
item->val.val = val;
item->val_type = SYSINFO_VAL_VAL;
}
}
192,7 → 192,7
sysinfo_item_t *item = sysinfo_create_path(name, root);
if (item != NULL) { /* If in subsystem, unable to create or return so unable to set */
item->val.fn=fn;
item->val.fn = fn;
item->val_type = SYSINFO_VAL_FUNCTION;
}
}
244,7 → 244,7
break;
}
printf("%s %s val:%d(%x) sub:%s\n", root->name, vtype, val,
printf("%s %s val:%" PRIun "(%" PRIxn ") sub:%s\n", root->name, vtype, val,
val, (root->subinfo_type == SYSINFO_SUBINFO_NONE) ?
"NON" : ((root->subinfo_type == SYSINFO_SUBINFO_TABLE) ?
"TAB" : "FUN"));
/branches/tracing/kernel/generic/src/interrupt/interrupt.c
113,27 → 113,32
char *symbol;
 
spinlock_lock(&exctbl_lock);
 
#ifdef __32_BITS__
printf("Exc Description Handler Symbol\n");
printf("--- -------------------- ---------- --------\n");
#endif
 
#ifdef __64_BITS__
printf("Exc Description Handler Symbol\n");
printf("--- -------------------- ------------------ --------\n");
#endif
if (sizeof(void *) == 4) {
printf("Exc Description Handler Symbol\n");
printf("--- -------------------- ---------- --------\n");
} else {
printf("Exc Description Handler Symbol\n");
printf("--- -------------------- ------------------ --------\n");
}
for (i = 0; i < IVT_ITEMS; i++) {
symbol = get_symtab_entry((unative_t) exc_table[i].f);
if (!symbol)
symbol = "not found";
 
#ifdef __32_BITS__
printf("%-3u %-20s %10p %s\n", i + IVT_FIRST, exc_table[i].name,
exc_table[i].f, symbol);
#endif
 
#ifdef __64_BITS__
printf("%-3u %-20s %18p %s\n", i + IVT_FIRST, exc_table[i].name,
exc_table[i].f, symbol);
#endif
if (sizeof(void *) == 4)
printf("%-3u %-20s %#10zx %s\n", i + IVT_FIRST, exc_table[i].name,
exc_table[i].f, symbol);
else
printf("%-3u %-20s %#18zx %s\n", i + IVT_FIRST, exc_table[i].name,
exc_table[i].f, symbol);
if (((i + 1) % 20) == 0) {
printf(" -- Press any key to continue -- ");
spinlock_unlock(&exctbl_lock);
163,7 → 168,7
{
int i;
 
for (i=0;i < IVT_ITEMS; i++)
for (i = 0; i < IVT_ITEMS; i++)
exc_register(i, "undef", (iroutine) exc_undef);
 
cmd_initialize(&exc_info);
/branches/tracing/kernel/generic/src/printf/vprintf.c
37,6 → 37,8
#include <putchar.h>
#include <synch/spinlock.h>
#include <arch/asm.h>
#include <arch/types.h>
#include <typedefs.h>
 
SPINLOCK_INITIALIZE(printf_lock); /**< vprintf spinlock */
 
/branches/tracing/kernel/generic/src/printf/printf_core.c
75,7 → 75,6
PrintfQualifierInt,
PrintfQualifierLong,
PrintfQualifierLongLong,
PrintfQualifierNative,
PrintfQualifierPointer
} qualifier_t;
 
432,7 → 431,6
* - "" Signed or unsigned int (default value).@n
* - "l" Signed or unsigned long int.@n
* - "ll" Signed or unsigned long long int.@n
* - "z" unative_t (non-standard extension).@n
*
*
* CONVERSION:@n
486,7 → 484,7
while ((c = fmt[i])) {
/* control character */
if (c == '%' ) {
if (c == '%') {
/* print common characters if any processed */
if (i > j) {
if ((retval = printf_putnchars(&fmt[j],
536,7 → 534,7
} else if (fmt[i] == '*') {
/* get width value from argument list */
i++;
width = (int)va_arg(ap, int);
width = (int) va_arg(ap, int);
if (width < 0) {
/* negative width sets '-' flag */
width *= -1;
559,7 → 557,7
* list.
*/
i++;
precision = (int)va_arg(ap, int);
precision = (int) va_arg(ap, int);
if (precision < 0) {
/* ignore negative precision */
precision = 0;
585,9 → 583,6
qualifier = PrintfQualifierLongLong;
}
break;
case 'z': /* unative_t */
qualifier = PrintfQualifierNative;
break;
default:
/* default type */
qualifier = PrintfQualifierInt;
627,7 → 622,7
* Integer values
*/
case 'P': /* pointer */
flags |= __PRINTF_FLAG_BIGCHARS;
flags |= __PRINTF_FLAG_BIGCHARS;
case 'p':
flags |= __PRINTF_FLAG_PREFIX;
base = 16;
670,34 → 665,28
switch (qualifier) {
case PrintfQualifierByte:
size = sizeof(unsigned char);
number = (uint64_t)va_arg(ap, unsigned int);
number = (uint64_t) va_arg(ap, unsigned int);
break;
case PrintfQualifierShort:
size = sizeof(unsigned short);
number = (uint64_t)va_arg(ap, unsigned int);
number = (uint64_t) va_arg(ap, unsigned int);
break;
case PrintfQualifierInt:
size = sizeof(unsigned int);
number = (uint64_t)va_arg(ap, unsigned int);
number = (uint64_t) va_arg(ap, unsigned int);
break;
case PrintfQualifierLong:
size = sizeof(unsigned long);
number = (uint64_t)va_arg(ap, unsigned long);
number = (uint64_t) va_arg(ap, unsigned long);
break;
case PrintfQualifierLongLong:
size = sizeof(unsigned long long);
number = (uint64_t)va_arg(ap,
unsigned long long);
number = (uint64_t) va_arg(ap, unsigned long long);
break;
case PrintfQualifierPointer:
size = sizeof(void *);
number = (uint64_t)(unsigned long)va_arg(ap,
void *);
number = (uint64_t) (unsigned long) va_arg(ap, void *);
break;
case PrintfQualifierNative:
size = sizeof(unative_t);
number = (uint64_t)va_arg(ap, unative_t);
break;
default: /* Unknown qualifier */
counter = -counter;
goto out;
708,7 → 697,7
flags |= __PRINTF_FLAG_NEGATIVE;
if (size == sizeof(uint64_t)) {
number = -((int64_t)number);
number = -((int64_t) number);
} else {
number = ~number;
number &=
734,7 → 723,7
}
if (i > j) {
if ((retval = printf_putnchars(&fmt[j], (unative_t)(i - j),
if ((retval = printf_putnchars(&fmt[j], (unative_t) (i - j),
ps)) < 0) { /* error */
counter = -counter;
goto out;
744,7 → 733,6
}
 
out:
return counter;
}
 
/branches/tracing/kernel/generic/src/console/klog.c
File deleted
/branches/tracing/kernel/generic/src/console/console.c
35,30 → 35,56
 
#include <console/console.h>
#include <console/chardev.h>
#include <sysinfo/sysinfo.h>
#include <synch/waitq.h>
#include <synch/spinlock.h>
#include <arch/types.h>
#include <ddi/device.h>
#include <ddi/irq.h>
#include <ddi/ddi.h>
#include <ipc/irq.h>
#include <arch.h>
#include <func.h>
#include <print.h>
#include <atomic.h>
 
#define BUFLEN 2048
static char debug_buffer[BUFLEN];
static size_t offset = 0;
/** Initialize stdout to something that does not print, but does not fail
*
* Save data in some buffer so that it could be retrieved in the debugger
#define KLOG_SIZE PAGE_SIZE
#define KLOG_LATENCY 8
 
/**< Kernel log cyclic buffer */
static char klog[KLOG_SIZE] __attribute__ ((aligned (PAGE_SIZE)));
 
/**< Kernel log initialized */
static bool klog_inited = false;
/**< First kernel log characters */
static index_t klog_start = 0;
/**< Number of valid kernel log characters */
static size_t klog_len = 0;
/**< Number of stored (not printed) kernel log characters */
static size_t klog_stored = 0;
/**< Number of stored kernel log characters for uspace */
static size_t klog_uspace = 0;
 
/**< Kernel log spinlock */
SPINLOCK_INITIALIZE(klog_lock);
 
/** Physical memory area used for klog buffer */
static parea_t klog_parea;
/*
* For now, we use 0 as INR.
* However, it is therefore desirable to have architecture specific
* definition of KLOG_VIRT_INR in the future.
*/
static void null_putchar(chardev_t *d, const char ch)
{
if (offset >= BUFLEN)
offset = 0;
debug_buffer[offset++] = ch;
}
#define KLOG_VIRT_INR 0
 
static irq_t klog_irq;
 
static chardev_operations_t null_stdout_ops = {
.write = null_putchar
.suspend = NULL,
.resume = NULL,
.write = NULL,
.read = NULL
};
 
chardev_t null_stdout = {
66,10 → 92,58
.op = &null_stdout_ops
};
 
/** Standard input character device. */
/** Allways refuse IRQ ownership.
*
* This is not a real IRQ, so we always decline.
*
* @return Always returns IRQ_DECLINE.
*/
static irq_ownership_t klog_claim(void)
{
return IRQ_DECLINE;
}
 
/** Standard input character device */
chardev_t *stdin = NULL;
chardev_t *stdout = &null_stdout;
 
/** Initialize kernel logging facility
*
* The shared area contains kernel cyclic buffer. Userspace application may
* be notified on new data with indication of position and size
* of the data within the circular buffer.
*/
void klog_init(void)
{
void *faddr = (void *) KA2PA(klog);
ASSERT((uintptr_t) faddr % FRAME_SIZE == 0);
ASSERT(KLOG_SIZE % FRAME_SIZE == 0);
 
devno_t devno = device_assign_devno();
klog_parea.pbase = (uintptr_t) faddr;
klog_parea.vbase = (uintptr_t) klog;
klog_parea.frames = SIZE2FRAMES(KLOG_SIZE);
klog_parea.cacheable = true;
ddi_parea_register(&klog_parea);
 
sysinfo_set_item_val("klog.faddr", NULL, (unative_t) faddr);
sysinfo_set_item_val("klog.pages", NULL, SIZE2FRAMES(KLOG_SIZE));
sysinfo_set_item_val("klog.devno", NULL, devno);
sysinfo_set_item_val("klog.inr", NULL, KLOG_VIRT_INR);
 
irq_initialize(&klog_irq);
klog_irq.devno = devno;
klog_irq.inr = KLOG_VIRT_INR;
klog_irq.claim = klog_claim;
irq_register(&klog_irq);
spinlock_lock(&klog_lock);
klog_inited = true;
spinlock_unlock(&klog_lock);
}
 
/** Get character from character device. Do not echo character.
*
* @param chardev Character device.
90,7 → 164,7
return chardev->op->read(chardev);
/* no other way of interacting with user, halt */
if (CPU)
printf("cpu%d: ", CPU->id);
printf("cpu%u: ", CPU->id);
else
printf("cpu: ");
printf("halted - no kconsole\n");
159,10 → 233,60
return ch;
}
 
void klog_update(void)
{
spinlock_lock(&klog_lock);
if ((klog_inited) && (klog_irq.notif_cfg.notify) && (klog_uspace > 0)) {
ipc_irq_send_msg_3(&klog_irq, klog_start, klog_len, klog_uspace);
klog_uspace = 0;
}
spinlock_unlock(&klog_lock);
}
 
void putchar(char c)
{
spinlock_lock(&klog_lock);
if ((klog_stored > 0) && (stdout->op->write)) {
/* Print charaters stored in kernel log */
index_t i;
for (i = klog_len - klog_stored; i < klog_len; i++)
stdout->op->write(stdout, klog[(klog_start + i) % KLOG_SIZE]);
klog_stored = 0;
}
/* Store character in the cyclic kernel log */
klog[(klog_start + klog_len) % KLOG_SIZE] = c;
if (klog_len < KLOG_SIZE)
klog_len++;
else
klog_start = (klog_start + 1) % KLOG_SIZE;
if (stdout->op->write)
stdout->op->write(stdout, c);
else {
/* The character is just in the kernel log */
if (klog_stored < klog_len)
klog_stored++;
}
/* The character is stored for uspace */
if (klog_uspace < klog_len)
klog_uspace++;
/* Check notify uspace to update */
bool update;
if ((klog_uspace > KLOG_LATENCY) || (c == '\n'))
update = true;
else
update = false;
spinlock_unlock(&klog_lock);
if (update)
klog_update();
}
 
/** @}
/branches/tracing/kernel/generic/src/console/cmd.c
563,7 → 563,7
/* This doesn't have to be very accurate */
unative_t sec = uptime->seconds1;
printf("Up %u days, %u hours, %u minutes, %u seconds\n",
printf("Up %" PRIun " days, %" PRIun " hours, %" PRIun " minutes, %" PRIun " seconds\n",
sec / 86400, (sec % 86400) / 3600, (sec % 3600) / 60, sec % 60);
return 1;
632,7 → 632,7
printf("Duplicate symbol, be more specific.\n");
} else {
symbol = get_symtab_entry(symaddr);
printf("Calling %s() (%.*p)\n", symbol, sizeof(uintptr_t) * 2, symaddr);
printf("Calling %s() (%p)\n", symbol, symaddr);
#ifdef ia64
fptr.f = symaddr;
fptr.gp = ((unative_t *)cmd_call2)[1];
640,7 → 640,7
#else
f = (unative_t (*)(void)) symaddr;
#endif
printf("Result: %#zx\n", f());
printf("Result: %#" PRIxn "\n", f());
}
return 1;
686,7 → 686,7
struct {
unative_t f;
unative_t gp;
}fptr;
} fptr;
#endif
 
symaddr = get_symbol_addr((char *) argv->buffer);
698,7 → 698,7
} else {
symbol = get_symtab_entry(symaddr);
 
printf("Calling f(%#zx): %.*p: %s\n", arg1, sizeof(uintptr_t) * 2, symaddr, symbol);
printf("Calling f(%#" PRIxn "): %p: %s\n", arg1, symaddr, symbol);
#ifdef ia64
fptr.f = symaddr;
fptr.gp = ((unative_t *)cmd_call2)[1];
706,7 → 706,7
#else
f = (unative_t (*)(unative_t,...)) symaddr;
#endif
printf("Result: %#zx\n", f(arg1));
printf("Result: %#" PRIxn "\n", f(arg1));
}
return 1;
735,8 → 735,8
printf("Duplicate symbol, be more specific.\n");
} else {
symbol = get_symtab_entry(symaddr);
printf("Calling f(0x%zx,0x%zx): %.*p: %s\n",
arg1, arg2, sizeof(uintptr_t) * 2, symaddr, symbol);
printf("Calling f(%#" PRIxn ", %#" PRIxn "): %p: %s\n",
arg1, arg2, symaddr, symbol);
#ifdef ia64
fptr.f = symaddr;
fptr.gp = ((unative_t *)cmd_call2)[1];
744,7 → 744,7
#else
f = (unative_t (*)(unative_t,unative_t,...)) symaddr;
#endif
printf("Result: %#zx\n", f(arg1, arg2));
printf("Result: %#" PRIxn "\n", f(arg1, arg2));
}
return 1;
774,8 → 774,8
printf("Duplicate symbol, be more specific.\n");
} else {
symbol = get_symtab_entry(symaddr);
printf("Calling f(0x%zx,0x%zx, 0x%zx): %.*p: %s\n",
arg1, arg2, arg3, sizeof(uintptr_t) * 2, symaddr, symbol);
printf("Calling f(%#" PRIxn ",%#" PRIxn ", %#" PRIxn "): %p: %s\n",
arg1, arg2, arg3, symaddr, symbol);
#ifdef ia64
fptr.f = symaddr;
fptr.gp = ((unative_t *)cmd_call2)[1];
783,7 → 783,7
#else
f = (unative_t (*)(unative_t,unative_t,unative_t,...)) symaddr;
#endif
printf("Result: %#zx\n", f(arg1, arg2, arg3));
printf("Result: %#" PRIxn "\n", f(arg1, arg2, arg3));
}
return 1;
856,7 → 856,7
} else {
if (pointer)
addr = (uint32_t *)(*(unative_t *)addr);
printf("Writing 0x%x -> %.*p\n", arg1, sizeof(uintptr_t) * 2, addr);
printf("Writing %#" PRIx64 " -> %p\n", arg1, addr);
*addr = arg1;
}
1025,7 → 1025,7
char suffix;
order(dt, &cycles, &suffix);
printf("Time: %llu%c cycles\n", cycles, suffix);
printf("Time: %" PRIu64 "%c cycles\n", cycles, suffix);
if (ret == NULL) {
printf("Test passed\n");
1053,7 → 1053,7
}
for (i = 0; i < cnt; i++) {
printf("%s (%d/%d) ... ", test->name, i + 1, cnt);
printf("%s (%u/%u) ... ", test->name, i + 1, cnt);
/* Update and read thread accounting
for benchmarking */
1081,7 → 1081,7
data[i] = dt;
order(dt, &cycles, &suffix);
printf("OK (%llu%c cycles)\n", cycles, suffix);
printf("OK (%" PRIu64 "%c cycles)\n", cycles, suffix);
}
if (ret) {
1094,7 → 1094,7
}
order(sum / (uint64_t) cnt, &cycles, &suffix);
printf("Average\t\t%llu%c\n", cycles, suffix);
printf("Average\t\t%" PRIu64 "%c\n", cycles, suffix);
}
free(data);
/branches/tracing/kernel/generic/src/console/chardev.c
42,7 → 42,7
* @param chardev Character device.
* @param op Implementation of character device operations.
*/
void chardev_initialize(char *name,chardev_t *chardev,
void chardev_initialize(char *name, chardev_t *chardev,
chardev_operations_t *op)
{
chardev->name = name;
/branches/tracing/kernel/generic/src/proc/scheduler.c
451,7 → 451,7
/*
* Entering state is unexpected.
*/
panic("tid%llu: unexpected state %s\n", THREAD->tid,
panic("tid%" PRIu64 ": unexpected state %s\n", THREAD->tid,
thread_states[THREAD->state]);
break;
}
504,7 → 504,7
THREAD->state = Running;
 
#ifdef SCHEDULER_VERBOSE
printf("cpu%d: tid %llu (priority=%d, ticks=%llu, nrdy=%ld)\n",
printf("cpu%u: tid %" PRIu64 " (priority=%d, ticks=%" PRIu64 ", nrdy=%ld)\n",
CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks,
atomic_get(&CPU->nrdy));
#endif
640,8 → 640,8
*/
spinlock_lock(&t->lock);
#ifdef KCPULB_VERBOSE
printf("kcpulb%d: TID %llu -> cpu%d, nrdy=%ld, "
"avg=%nd\n", CPU->id, t->tid, CPU->id,
printf("kcpulb%u: TID %" PRIu64 " -> cpu%u, nrdy=%ld, "
"avg=%ld\n", CPU->id, t->tid, CPU->id,
atomic_get(&CPU->nrdy),
atomic_get(&nrdy) / config.cpu_active);
#endif
708,7 → 708,7
continue;
 
spinlock_lock(&cpus[cpu].lock);
printf("cpu%d: address=%p, nrdy=%ld, needs_relink=%ld\n",
printf("cpu%u: address=%p, nrdy=%ld, needs_relink=%" PRIc "\n",
cpus[cpu].id, &cpus[cpu], atomic_get(&cpus[cpu].nrdy),
cpus[cpu].needs_relink);
719,11 → 719,11
spinlock_unlock(&r->lock);
continue;
}
printf("\trq[%d]: ", i);
printf("\trq[%u]: ", i);
for (cur = r->rq_head.next; cur != &r->rq_head;
cur = cur->next) {
t = list_get_instance(cur, thread_t, rq_link);
printf("%llu(%s) ", t->tid,
printf("%" PRIu64 "(%s) ", t->tid,
thread_states[t->state]);
}
printf("\n");
/branches/tracing/kernel/generic/src/proc/task.c
45,6 → 45,7
#include <synch/spinlock.h>
#include <synch/waitq.h>
#include <arch.h>
#include <arch/barrier.h>
#include <panic.h>
#include <adt/avl.h>
#include <adt/btree.h>
128,7 → 129,7
interrupts_restore(ipl);
#ifdef CONFIG_DEBUG
printf("Killing task %llu\n", id);
printf("Killing task %" PRIu64 "\n", id);
#endif
task_kill(id);
thread_usleep(10000);
242,78 → 243,106
TASK = NULL;
}
 
/** Create new task with 1 thread and run it
/** Syscall for reading task ID from userspace.
*
* @param program_addr Address of program executable image.
* @param name Program name.
* @param uspace_task_id Userspace address of 8-byte buffer where to store
* current task ID.
*
* @return Task of the running program or NULL on error.
* @return 0 on success or an error code from @ref errno.h.
*/
task_t *task_run_program(void *program_addr, char *name)
unative_t sys_task_get_id(task_id_t *uspace_task_id)
{
as_t *as;
as_area_t *a;
unsigned int rc;
thread_t *t;
task_t *task;
uspace_arg_t *kernel_uarg;
/*
* No need to acquire lock on TASK because taskid
* remains constant for the lifespan of the task.
*/
return (unative_t) copy_to_uspace(uspace_task_id, &TASK->taskid,
sizeof(TASK->taskid));
}
 
as = as_create(0);
ASSERT(as);
unative_t sys_task_spawn(void *image, size_t size)
{
void *kimage = malloc(size, 0);
if (kimage == NULL)
return ENOMEM;
int rc = copy_from_uspace(kimage, image, size);
if (rc != EOK)
return rc;
 
rc = elf_load((elf_header_t *) program_addr, as);
if (rc != EE_OK) {
as_destroy(as);
return NULL;
}
/*
* Not very efficient and it would be better to call it on code only,
* but this whole function is a temporary hack anyway and one day it
* will go in favor of the userspace dynamic loader.
*/
smc_coherence_block(kimage, size);
uspace_arg_t *kernel_uarg;
kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
if (kernel_uarg == NULL) {
free(kimage);
return ENOMEM;
}
kernel_uarg->uspace_entry =
(void *) ((elf_header_t *) program_addr)->e_entry;
(void *) ((elf_header_t *) kimage)->e_entry;
kernel_uarg->uspace_stack = (void *) USTACK_ADDRESS;
kernel_uarg->uspace_thread_function = NULL;
kernel_uarg->uspace_thread_arg = NULL;
kernel_uarg->uspace_uarg = NULL;
task = task_create(as, name);
ASSERT(task);
 
/*
* Create the data as_area.
*/
a = as_area_create(as, AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE,
as_t *as = as_create(0);
if (as == NULL) {
free(kernel_uarg);
free(kimage);
return ENOMEM;
}
unsigned int erc = elf_load((elf_header_t *) kimage, as);
if (erc != EE_OK) {
as_destroy(as);
free(kernel_uarg);
free(kimage);
return ENOENT;
}
as_area_t *area = as_area_create(as,
AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE,
LOADED_PROG_STACK_PAGES_NO * PAGE_SIZE, USTACK_ADDRESS,
AS_AREA_ATTR_NONE, &anon_backend, NULL);
 
/*
* Create the main thread.
*/
t = thread_create(uinit, kernel_uarg, task, THREAD_FLAG_USPACE,
"uinit", false);
ASSERT(t);
if (area == NULL) {
as_destroy(as);
free(kernel_uarg);
free(kimage);
return ENOMEM;
}
thread_ready(t);
 
return task;
task_t *task = task_create(as, "app");
if (task == NULL) {
as_destroy(as);
free(kernel_uarg);
free(kimage);
return ENOENT;
}
// FIXME: control the capabilities
cap_set(task, cap_get(TASK));
thread_t *thread = thread_create(uinit, kernel_uarg, task,
THREAD_FLAG_USPACE, "user", false);
if (thread == NULL) {
task_destroy(task);
as_destroy(as);
free(kernel_uarg);
free(kimage);
return ENOENT;
}
thread_ready(thread);
return EOK;
}
 
/** Syscall for reading task ID from userspace.
*
* @param uspace_task_id Userspace address of 8-byte buffer where to store
* current task ID.
*
* @return 0 on success or an error code from @ref errno.h.
*/
unative_t sys_task_get_id(task_id_t *uspace_task_id)
{
/*
* No need to acquire lock on TASK because taskid
* remains constant for the lifespan of the task.
*/
return (unative_t) copy_to_uspace(uspace_task_id, &TASK->taskid,
sizeof(TASK->taskid));
}
 
/** Find task structure corresponding to task ID.
*
* The tasks_lock must be already held by the caller of this function
429,18 → 458,22
uint64_t cycles;
char suffix;
order(task_get_accounting(t), &cycles, &suffix);
if (sizeof(void *) == 4)
printf("%-6llu %-10s %-3ld %#10zx %#10zx %9llu%c %7zd %6zd",
t->taskid, t->name, t->context, t, t->as, cycles, suffix,
t->refcount, atomic_get(&t->active_calls));
else
printf("%-6llu %-10s %-3ld %#18zx %#18zx %9llu%c %7zd %6zd",
t->taskid, t->name, t->context, t, t->as, cycles, suffix,
t->refcount, atomic_get(&t->active_calls));
 
#ifdef __32_BITS__
printf("%-6" PRIu64 " %-10s %-3" PRIu32 " %10p %10p %9" PRIu64
"%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles,
suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls));
#endif
 
#ifdef __64_BITS__
printf("%-6" PRIu64 " %-10s %-3" PRIu32 " %18p %18p %9" PRIu64
"%c %7ld %6ld", t->taskid, t->name, t->context, t, t->as, cycles,
suffix, atomic_get(&t->refcount), atomic_get(&t->active_calls));
#endif
 
for (j = 0; j < IPC_MAX_PHONES; j++) {
if (t->phones[j].callee)
printf(" %zd:%#zx", j, t->phones[j].callee);
printf(" %d:%p", j, t->phones[j].callee);
}
printf("\n");
456,19 → 489,21
/* Messing with task structures, avoid deadlock */
ipl = interrupts_disable();
spinlock_lock(&tasks_lock);
if (sizeof(void *) == 4) {
printf("taskid name ctx address as "
"cycles threads calls callee\n");
printf("------ ---------- --- ---------- ---------- "
"---------- ------- ------ ------>\n");
} else {
printf("taskid name ctx address as "
"cycles threads calls callee\n");
printf("------ ---------- --- ------------------ ------------------ "
"---------- ------- ------ ------>\n");
}
 
#ifdef __32_BITS__
printf("taskid name ctx address as "
"cycles threads calls callee\n");
printf("------ ---------- --- ---------- ---------- "
"---------- ------- ------ ------>\n");
#endif
 
#ifdef __64_BITS__
printf("taskid name ctx address as "
"cycles threads calls callee\n");
printf("------ ---------- --- ------------------ ------------------ "
"---------- ------- ------ ------>\n");
#endif
 
avltree_walk(&tasks_tree, task_print_walker, NULL);
 
spinlock_unlock(&tasks_lock);
/branches/tracing/kernel/generic/src/proc/thread.c
67,9 → 67,13
#include <main/uinit.h>
#include <syscall/copy.h>
#include <errno.h>
#include <console/klog.h>
 
 
#ifndef LOADED_PROG_STACK_PAGES_NO
#define LOADED_PROG_STACK_PAGES_NO 1
#endif
 
 
/** Thread states */
char *thread_states[] = {
"Invalid",
291,8 → 295,7
return NULL;
/* Not needed, but good for debugging */
memsetb((uintptr_t) t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES,
0);
memsetb(t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0);
ipl = interrupts_disable();
spinlock_lock(&tidlock);
454,9 → 457,8
* We are safe to perform cleanup.
*/
ipc_cleanup();
futex_cleanup();
klog_printf("Cleanup of task %llu completed.",
TASK->taskid);
futex_cleanup();
LOG("Cleanup of task %" PRIu64" completed.", TASK->taskid);
}
}
 
592,33 → 594,37
 
static bool thread_walker(avltree_node_t *node, void *arg)
{
thread_t *t;
t = avltree_get_instance(node, thread_t, threads_tree_node);
 
thread_t *t = avltree_get_instance(node, thread_t, threads_tree_node);
uint64_t cycles;
char suffix;
order(t->cycles, &cycles, &suffix);
if (sizeof(void *) == 4)
printf("%-6llu %-10s %#10zx %-8s %#10zx %-3ld %#10zx %#10zx %9llu%c ",
t->tid, t->name, t, thread_states[t->state], t->task,
t->task->context, t->thread_code, t->kstack, cycles, suffix);
else
printf("%-6llu %-10s %#18zx %-8s %#18zx %-3ld %#18zx %#18zx %9llu%c ",
t->tid, t->name, t, thread_states[t->state], t->task,
t->task->context, t->thread_code, t->kstack, cycles, suffix);
 
#ifdef __32_BITS__
printf("%-6" PRIu64" %-10s %10p %-8s %10p %-3" PRIu32 " %10p %10p %9" PRIu64 "%c ",
t->tid, t->name, t, thread_states[t->state], t->task,
t->task->context, t->thread_code, t->kstack, cycles, suffix);
#endif
 
#ifdef __64_BITS__
printf("%-6" PRIu64" %-10s %18p %-8s %18p %-3" PRIu32 " %18p %18p %9" PRIu64 "%c ",
t->tid, t->name, t, thread_states[t->state], t->task,
t->task->context, t->thread_code, t->kstack, cycles, suffix);
#endif
if (t->cpu)
printf("%-4zd", t->cpu->id);
printf("%-4u", t->cpu->id);
else
printf("none");
if (t->state == Sleeping) {
if (sizeof(uintptr_t) == 4)
printf(" %#10zx", t->sleep_queue);
else
printf(" %#18zx", t->sleep_queue);
#ifdef __32_BITS__
printf(" %10p", t->sleep_queue);
#endif
 
#ifdef __64_BITS__
printf(" %18p", t->sleep_queue);
#endif
}
printf("\n");
634,23 → 640,25
/* Messing with thread structures, avoid deadlock */
ipl = interrupts_disable();
spinlock_lock(&threads_lock);
if (sizeof(uintptr_t) == 4) {
printf("tid name address state task "
"ctx code stack cycles cpu "
"waitqueue\n");
printf("------ ---------- ---------- -------- ---------- "
"--- ---------- ---------- ---------- ---- "
"----------\n");
} else {
printf("tid name address state task "
"ctx code stack cycles cpu "
"waitqueue\n");
printf("------ ---------- ------------------ -------- ------------------ "
"--- ------------------ ------------------ ---------- ---- "
"------------------\n");
}
 
#ifdef __32_BITS__
printf("tid name address state task "
"ctx code stack cycles cpu "
"waitqueue\n");
printf("------ ---------- ---------- -------- ---------- "
"--- ---------- ---------- ---------- ---- "
"----------\n");
#endif
 
#ifdef __64_BITS__
printf("tid name address state task "
"ctx code stack cycles cpu "
"waitqueue\n");
printf("------ ---------- ------------------ -------- ------------------ "
"--- ------------------ ------------------ ---------- ---- "
"------------------\n");
#endif
 
avltree_walk(&threads_tree, thread_walker, NULL);
 
spinlock_unlock(&threads_lock);
676,6 → 684,73
}
 
 
/** Create new user task with 1 thread from image
*
* @param program_addr Address of program executable image.
* @param name Program name.
*
* @return Initialized main thread of the task or NULL on error.
*/
thread_t *thread_create_program(void *program_addr, char *name)
{
as_t *as;
as_area_t *area;
unsigned int rc;
task_t *task;
uspace_arg_t *kernel_uarg;
kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0);
if (kernel_uarg == NULL)
return NULL;
kernel_uarg->uspace_entry =
(void *) ((elf_header_t *) program_addr)->e_entry;
kernel_uarg->uspace_stack = (void *) USTACK_ADDRESS;
kernel_uarg->uspace_thread_function = NULL;
kernel_uarg->uspace_thread_arg = NULL;
kernel_uarg->uspace_uarg = NULL;
 
as = as_create(0);
if (as == NULL) {
free(kernel_uarg);
return NULL;
}
 
rc = elf_load((elf_header_t *) program_addr, as);
if (rc != EE_OK) {
free(kernel_uarg);
as_destroy(as);
return NULL;
}
/*
* Create the data as_area.
*/
area = as_area_create(as,
AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE,
LOADED_PROG_STACK_PAGES_NO * PAGE_SIZE, USTACK_ADDRESS,
AS_AREA_ATTR_NONE, &anon_backend, NULL);
if (area == NULL) {
free(kernel_uarg);
as_destroy(as);
return NULL;
}
task = task_create(as, name);
if (task == NULL) {
free(kernel_uarg);
as_destroy(as);
return NULL;
}
/*
* Create the main thread.
*/
return thread_create(uinit, kernel_uarg, task, THREAD_FLAG_USPACE,
"uinit", false);
}
 
 
/** Update accounting of current thread.
*
* Note that thread_lock on THREAD must be already held and
/branches/tracing/kernel/generic/src/lib/memstr.c
67,10 → 67,10
((uint8_t *) dst)[i] = ((uint8_t *) src)[i];
} else {
for (i = 0; i < cnt/sizeof(unative_t); i++)
for (i = 0; i < cnt / sizeof(unative_t); i++)
((unative_t *) dst)[i] = ((unative_t *) src)[i];
for (j = 0; j < cnt%sizeof(unative_t); j++)
for (j = 0; j < cnt % sizeof(unative_t); j++)
((uint8_t *)(((unative_t *) dst) + i))[j] = ((uint8_t *)(((unative_t *) src) + i))[j];
}
87,7 → 87,7
* @param x Value to fill.
*
*/
void _memsetb(uintptr_t dst, size_t cnt, uint8_t x)
void _memsetb(void *dst, size_t cnt, uint8_t x)
{
unsigned int i;
uint8_t *p = (uint8_t *) dst;
106,7 → 106,7
* @param x Value to fill.
*
*/
void _memsetw(uintptr_t dst, size_t cnt, uint16_t x)
void _memsetw(void *dst, size_t cnt, uint16_t x)
{
unsigned int i;
uint16_t *p = (uint16_t *) dst;
/branches/tracing/kernel/generic/src/lib/func.c
73,7 → 73,7
}
#endif
if (CPU)
printf("cpu%d: halted\n", CPU->id);
printf("cpu%u: halted\n", CPU->id);
else
printf("cpu: halted\n");
cpu_halt();
/branches/tracing/kernel/generic/src/lib/objc_ext.c
101,7 → 101,7
 
void __assert_fail(const char *assertion, const char *file, unsigned int line, const char *function)
{
panic("Run-time assertion (%s:%d:%s) failed (%s)", file, line, function ? function : "", assertion);
panic("Run-time assertion (%s:%u:%s) failed (%s)", file, line, function ? function : "", assertion);
}
 
void abort(void)
161,7 → 161,7
 
void *memset(void *s, int c, size_t n)
{
memsetb((uintptr_t) s, n, c);
memsetb(s, n, c);
return s;
}
 
/branches/tracing/kernel/generic/src/adt/btree.c
124,7 → 124,7
lnode = leaf_node;
if (!lnode) {
if (btree_search(t, key, &lnode)) {
panic("B-tree %p already contains key %d\n", t, key);
panic("B-tree %p already contains key %" PRIu64 "\n", t, key);
}
}
224,7 → 224,7
lnode = leaf_node;
if (!lnode) {
if (!btree_search(t, key, &lnode)) {
panic("B-tree %p does not contain key %d\n", t, key);
panic("B-tree %p does not contain key %" PRIu64 "\n", t, key);
}
}
524,7 → 524,7
return;
}
}
panic("node %p does not contain key %d\n", node, key);
panic("node %p does not contain key %" PRIu64 "\n", node, key);
}
 
/** Remove key and its right subtree pointer from B-tree node.
551,7 → 551,7
return;
}
}
panic("node %p does not contain key %d\n", node, key);
panic("node %p does not contain key %" PRIu64 "\n", node, key);
}
 
/** Split full B-tree node and insert new key-value-right-subtree triplet.
970,7 → 970,7
 
printf("(");
for (i = 0; i < node->keys; i++) {
printf("%llu%s", node->key[i], i < node->keys - 1 ? "," : "");
printf("%" PRIu64 "%s", node->key[i], i < node->keys - 1 ? "," : "");
if (node->depth && node->subtree[i]) {
list_append(&node->subtree[i]->bfs_link, &head);
}
992,7 → 992,7
 
printf("(");
for (i = 0; i < node->keys; i++)
printf("%llu%s", node->key[i], i < node->keys - 1 ? "," : "");
printf("%" PRIu64 "%s", node->key[i], i < node->keys - 1 ? "," : "");
printf(")");
}
printf("\n");
/branches/tracing/kernel/generic/src/adt/hash_table.c
63,7 → 63,7
if (!h->entry) {
panic("cannot allocate memory for hash table\n");
}
memsetb((uintptr_t) h->entry, m * sizeof(link_t), 0);
memsetb(h->entry, m * sizeof(link_t), 0);
for (i = 0; i < m; i++)
list_initialize(&h->entry[i]);
/branches/tracing/kernel/generic/src/mm/slab.c
167,7 → 167,7
* Allocate frames for slab space and initialize
*
*/
static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
static slab_t *slab_space_alloc(slab_cache_t *cache, int flags)
{
void *data;
slab_t *slab;
179,7 → 179,7
if (!data) {
return NULL;
}
if (! (cache->flags & SLAB_CACHE_SLINSIDE)) {
if (!(cache->flags & SLAB_CACHE_SLINSIDE)) {
slab = slab_alloc(slab_extern_cache, flags);
if (!slab) {
frame_free(KA2PA(data));
200,7 → 200,7
slab->cache = cache;
 
for (i = 0; i < cache->objects; i++)
*((int *) (slab->start + i*cache->size)) = i+1;
*((int *) (slab->start + i*cache->size)) = i + 1;
 
atomic_inc(&cache->allocated_slabs);
return slab;
239,8 → 239,7
*
* @return Number of freed pages
*/
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
slab_t *slab)
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, slab_t *slab)
{
int freed = 0;
 
256,7 → 255,7
ASSERT(slab->available < cache->objects);
 
*((int *)obj) = slab->nextavail;
slab->nextavail = (obj - slab->start)/cache->size;
slab->nextavail = (obj - slab->start) / cache->size;
slab->available++;
 
/* Move it to correct list */
281,7 → 280,7
*
* @return Object address or null
*/
static void * slab_obj_create(slab_cache_t *cache, int flags)
static void *slab_obj_create(slab_cache_t *cache, int flags)
{
slab_t *slab;
void *obj;
301,7 → 300,8
return NULL;
spinlock_lock(&cache->slablock);
} else {
slab = list_get_instance(cache->partial_slabs.next, slab_t, link);
slab = list_get_instance(cache->partial_slabs.next, slab_t,
link);
list_remove(&slab->link);
}
obj = slab->start + slab->nextavail * cache->size;
332,8 → 332,7
*
* @param first If true, return first, else last mag
*/
static slab_magazine_t * get_mag_from_cache(slab_cache_t *cache,
int first)
static slab_magazine_t *get_mag_from_cache(slab_cache_t *cache, int first)
{
slab_magazine_t *mag = NULL;
link_t *cur;
368,8 → 367,7
*
* @return Number of freed pages
*/
static count_t magazine_destroy(slab_cache_t *cache,
slab_magazine_t *mag)
static count_t magazine_destroy(slab_cache_t *cache, slab_magazine_t *mag)
{
unsigned int i;
count_t frames = 0;
389,7 → 387,7
*
* Assume cpu_magazine lock is held
*/
static slab_magazine_t * get_full_current_mag(slab_cache_t *cache)
static slab_magazine_t *get_full_current_mag(slab_cache_t *cache)
{
slab_magazine_t *cmag, *lastmag, *newmag;
 
423,7 → 421,7
*
* @return Pointer to object or NULL if not available
*/
static void * magazine_obj_get(slab_cache_t *cache)
static void *magazine_obj_get(slab_cache_t *cache)
{
slab_magazine_t *mag;
void *obj;
458,7 → 456,7
* allocate new, exchange last & current
*
*/
static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache)
static slab_magazine_t *make_empty_current_mag(slab_cache_t *cache)
{
slab_magazine_t *cmag,*lastmag,*newmag;
 
530,7 → 528,8
static unsigned int comp_objects(slab_cache_t *cache)
{
if (cache->flags & SLAB_CACHE_SLINSIDE)
return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) /
cache->size;
else
return (PAGE_SIZE << cache->order) / cache->size;
}
557,28 → 556,25
ASSERT(_slab_initialized >= 2);
 
cache->mag_cache = malloc(sizeof(slab_mag_cache_t)*config.cpu_count,0);
cache->mag_cache = malloc(sizeof(slab_mag_cache_t) * config.cpu_count,
0);
for (i = 0; i < config.cpu_count; i++) {
memsetb((uintptr_t)&cache->mag_cache[i],
sizeof(cache->mag_cache[i]), 0);
spinlock_initialize(&cache->mag_cache[i].lock, "slab_maglock_cpu");
memsetb(&cache->mag_cache[i], sizeof(cache->mag_cache[i]), 0);
spinlock_initialize(&cache->mag_cache[i].lock,
"slab_maglock_cpu");
}
}
 
/** Initialize allocated memory as a slab cache */
static void
_slab_cache_create(slab_cache_t *cache,
char *name,
size_t size,
size_t align,
int (*constructor)(void *obj, int kmflag),
int (*destructor)(void *obj),
int flags)
_slab_cache_create(slab_cache_t *cache, char *name, size_t size, size_t align,
int (*constructor)(void *obj, int kmflag), int (*destructor)(void *obj),
int flags)
{
int pages;
ipl_t ipl;
 
memsetb((uintptr_t)cache, sizeof(*cache), 0);
memsetb(cache, sizeof(*cache), 0);
cache->name = name;
 
if (align < sizeof(unative_t))
596,7 → 592,7
list_initialize(&cache->magazines);
spinlock_initialize(&cache->slablock, "slab_lock");
spinlock_initialize(&cache->maglock, "slab_maglock");
if (! (cache->flags & SLAB_CACHE_NOMAGAZINE))
if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
make_magcache(cache);
 
/* Compute slab sizes, object counts in slabs etc. */
609,7 → 605,7
if (pages == 1)
cache->order = 0;
else
cache->order = fnzb(pages-1)+1;
cache->order = fnzb(pages - 1) + 1;
 
while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
cache->order += 1;
630,18 → 626,16
}
 
/** Create slab cache */
slab_cache_t * slab_cache_create(char *name,
size_t size,
size_t align,
int (*constructor)(void *obj, int kmflag),
int (*destructor)(void *obj),
int flags)
slab_cache_t *
slab_cache_create(char *name, size_t size, size_t align,
int (*constructor)(void *obj, int kmflag), int (*destructor)(void *obj),
int flags)
{
slab_cache_t *cache;
 
cache = slab_alloc(&slab_cache_cache, 0);
_slab_cache_create(cache, name, size, align, constructor, destructor,
flags);
flags);
return cache;
}
 
665,7 → 659,7
* endless loop
*/
magcount = atomic_get(&cache->magazine_counter);
while (magcount-- && (mag=get_mag_from_cache(cache,0))) {
while (magcount-- && (mag=get_mag_from_cache(cache, 0))) {
frames += magazine_destroy(cache,mag);
if (!(flags & SLAB_RECLAIM_ALL) && frames)
break;
718,8 → 712,8
_slab_reclaim(cache, SLAB_RECLAIM_ALL);
 
/* All slabs must be empty */
if (!list_empty(&cache->full_slabs) \
|| !list_empty(&cache->partial_slabs))
if (!list_empty(&cache->full_slabs) ||
!list_empty(&cache->partial_slabs))
panic("Destroying cache that is not empty.");
 
if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
727,9 → 721,8
slab_free(&slab_cache_cache, cache);
}
 
/** Allocate new object from cache - if no flags given, always returns
memory */
void * slab_alloc(slab_cache_t *cache, int flags)
/** Allocate new object from cache - if no flags given, always returns memory */
void *slab_alloc(slab_cache_t *cache, int flags)
{
ipl_t ipl;
void *result = NULL;
758,9 → 751,8
 
ipl = interrupts_disable();
 
if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
|| magazine_obj_put(cache, obj)) {
 
if ((cache->flags & SLAB_CACHE_NOMAGAZINE) ||
magazine_obj_put(cache, obj)) {
slab_obj_destroy(cache, obj, slab);
 
}
787,7 → 779,8
* memory allocation from interrupts can deadlock.
*/
 
for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
for (cur = slab_cache_list.next; cur != &slab_cache_list;
cur = cur->next) {
cache = list_get_instance(cur, slab_cache_t, link);
frames += _slab_reclaim(cache, flags);
}
807,13 → 800,21
ipl = interrupts_disable();
spinlock_lock(&slab_cache_lock);
printf("slab name size pages obj/pg slabs cached allocated ctl\n");
printf("---------------- -------- ------ ------ ------ ------ --------- ---\n");
printf("slab name size pages obj/pg slabs cached allocated"
" ctl\n");
printf("---------------- -------- ------ ------ ------ ------ ---------"
" ---\n");
for (cur = slab_cache_list.next; cur != &slab_cache_list; cur = cur->next) {
for (cur = slab_cache_list.next; cur != &slab_cache_list;
cur = cur->next) {
cache = list_get_instance(cur, slab_cache_t, link);
printf("%-16s %8zd %6zd %6zd %6zd %6zd %9zd %-3s\n", cache->name, cache->size, (1 << cache->order), cache->objects, atomic_get(&cache->allocated_slabs), atomic_get(&cache->cached_objs), atomic_get(&cache->allocated_objs), cache->flags & SLAB_CACHE_SLINSIDE ? "in" : "out");
printf("%-16s %8" PRIs " %6d %6u %6ld %6ld %9ld %-3s\n",
cache->name, cache->size, (1 << cache->order),
cache->objects, atomic_get(&cache->allocated_slabs),
atomic_get(&cache->cached_objs),
atomic_get(&cache->allocated_objs),
cache->flags & SLAB_CACHE_SLINSIDE ? "in" : "out");
}
spinlock_unlock(&slab_cache_lock);
interrupts_restore(ipl);
824,32 → 825,24
int i, size;
 
/* Initialize magazine cache */
_slab_cache_create(&mag_cache,
"slab_magazine",
sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*),
sizeof(uintptr_t),
NULL, NULL,
SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
_slab_cache_create(&mag_cache, "slab_magazine",
sizeof(slab_magazine_t) + SLAB_MAG_SIZE * sizeof(void*),
sizeof(uintptr_t), NULL, NULL, SLAB_CACHE_NOMAGAZINE |
SLAB_CACHE_SLINSIDE);
/* Initialize slab_cache cache */
_slab_cache_create(&slab_cache_cache,
"slab_cache",
sizeof(slab_cache_cache),
sizeof(uintptr_t),
NULL, NULL,
SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
_slab_cache_create(&slab_cache_cache, "slab_cache",
sizeof(slab_cache_cache), sizeof(uintptr_t), NULL, NULL,
SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
/* Initialize external slab cache */
slab_extern_cache = slab_cache_create("slab_extern",
sizeof(slab_t),
0, NULL, NULL,
SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED);
slab_extern_cache = slab_cache_create("slab_extern", sizeof(slab_t), 0,
NULL, NULL, SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED);
 
/* Initialize structures for malloc */
for (i=0, size=(1<<SLAB_MIN_MALLOC_W);
i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1);
i++, size <<= 1) {
malloc_caches[i] = slab_cache_create(malloc_names[i],
size, 0,
NULL,NULL, SLAB_CACHE_MAGDEFERRED);
for (i = 0, size = (1 << SLAB_MIN_MALLOC_W);
i < (SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1);
i++, size <<= 1) {
malloc_caches[i] = slab_cache_create(malloc_names[i], size, 0,
NULL, NULL, SLAB_CACHE_MAGDEFERRED);
}
#ifdef CONFIG_DEBUG
_slab_initialized = 1;
874,9 → 867,11
 
spinlock_lock(&slab_cache_lock);
for (cur=slab_cache_list.next; cur != &slab_cache_list;cur=cur->next){
for (cur = slab_cache_list.next; cur != &slab_cache_list;
cur = cur->next){
s = list_get_instance(cur, slab_cache_t, link);
if ((s->flags & SLAB_CACHE_MAGDEFERRED) != SLAB_CACHE_MAGDEFERRED)
if ((s->flags & SLAB_CACHE_MAGDEFERRED) !=
SLAB_CACHE_MAGDEFERRED)
continue;
make_magcache(s);
s->flags &= ~SLAB_CACHE_MAGDEFERRED;
887,7 → 882,7
 
/**************************************/
/* kalloc/kfree functions */
void * malloc(unsigned int size, int flags)
void *malloc(unsigned int size, int flags)
{
ASSERT(_slab_initialized);
ASSERT(size && size <= (1 << SLAB_MAX_MALLOC_W));
900,7 → 895,7
return slab_alloc(malloc_caches[idx], flags);
}
 
void * realloc(void *ptr, unsigned int size, int flags)
void *realloc(void *ptr, unsigned int size, int flags)
{
ASSERT(_slab_initialized);
ASSERT(size <= (1 << SLAB_MAX_MALLOC_W));
/branches/tracing/kernel/generic/src/mm/backend_anon.c
113,7 → 113,7
}
if (allocate) {
frame = (uintptr_t) frame_alloc(ONE_FRAME, 0);
memsetb(PA2KA(frame), FRAME_SIZE, 0);
memsetb((void *) PA2KA(frame), FRAME_SIZE, 0);
dirty = true;
/*
144,7 → 144,7
* the different causes
*/
frame = (uintptr_t) frame_alloc(ONE_FRAME, 0);
memsetb(PA2KA(frame), FRAME_SIZE, 0);
memsetb((void *) PA2KA(frame), FRAME_SIZE, 0);
dirty = true;
}
/branches/tracing/kernel/generic/src/mm/as.c
324,8 → 324,7
if (backend_data)
a->backend_data = *backend_data;
else
memsetb((uintptr_t) &a->backend_data, sizeof(a->backend_data),
0);
memsetb(&a->backend_data, sizeof(a->backend_data), 0);
 
btree_create(&a->used_space);
453,10 → 452,8
cond = false; /* we are almost done */
i = (start_free - b) >> PAGE_WIDTH;
if (!used_space_remove(area, start_free,
c - i))
panic("Could not remove used "
"space.\n");
if (!used_space_remove(area, start_free, c - i))
panic("Could not remove used space.\n");
} else {
/*
* The interval of used space can be
463,8 → 460,7
* completely removed.
*/
if (!used_space_remove(area, b, c))
panic("Could not remove used "
"space.\n");
panic("Could not remove used space.\n");
}
for (; i < c; i++) {
1728,7 → 1724,7
}
}
 
panic("Inconsistency detected while adding %d pages of used space at "
panic("Inconsistency detected while adding %" PRIc " pages of used space at "
"%p.\n", count, page);
}
 
1907,7 → 1903,7
}
 
error:
panic("Inconsistency detected while removing %d pages of used space "
panic("Inconsistency detected while removing %" PRIc " pages of used space "
"from %p.\n", count, page);
}
 
2000,9 → 1996,9
as_area_t *area = node->value[i];
mutex_lock(&area->lock);
printf("as_area: %p, base=%p, pages=%d (%p - %p)\n",
printf("as_area: %p, base=%p, pages=%" PRIc " (%p - %p)\n",
area, area->base, area->pages, area->base,
area->base + area->pages*PAGE_SIZE);
area->base + FRAMES2SIZE(area->pages));
mutex_unlock(&area->lock);
}
}
/branches/tracing/kernel/generic/src/mm/buddy.c
44,6 → 44,7
#include <arch/types.h>
#include <debug.h>
#include <print.h>
#include <macros.h>
 
/** Return size needed for the buddy configuration data */
size_t buddy_conf_size(int max_order)
289,13 → 290,13
void buddy_system_structure_print(buddy_system_t *b, size_t elem_size) {
index_t i;
count_t cnt, elem_count = 0, block_count = 0;
link_t * cur;
link_t *cur;
 
printf("Order\tBlocks\tSize \tBlock size\tElems per block\n");
printf("-----\t------\t--------\t----------\t---------------\n");
for (i=0;i <= b->max_order; i++) {
for (i = 0;i <= b->max_order; i++) {
cnt = 0;
if (!list_empty(&b->order[i])) {
for (cur = b->order[i].next; cur != &b->order[i]; cur = cur->next)
302,7 → 303,8
cnt++;
}
printf("#%zd\t%5zd\t%7zdK\t%8zdK\t%6zd\t", i, cnt, (cnt * (1 << i) * elem_size) >> 10, ((1 << i) * elem_size) >> 10, 1 << i);
printf("#%" PRIi "\t%5" PRIc "\t%7" PRIc "K\t%8" PRIi "K\t%6u\t",
i, cnt, SIZE2KB(cnt * (1 << i) * elem_size), SIZE2KB((1 << i) * elem_size), 1 << i);
if (!list_empty(&b->order[i])) {
for (cur = b->order[i].next; cur != &b->order[i]; cur = cur->next) {
b->op->print_id(b, cur);
315,8 → 317,7
elem_count += (1 << i) * cnt;
}
printf("-----\t------\t--------\t----------\t---------------\n");
printf("Buddy system contains %zd free elements (%zd blocks)\n" , elem_count, block_count);
 
printf("Buddy system contains %" PRIc " free elements (%" PRIc " blocks)\n" , elem_count, block_count);
}
 
/** @}
/branches/tracing/kernel/generic/src/mm/frame.c
318,7 → 318,7
frame = list_get_instance(block, frame_t, buddy_link);
zone = (zone_t *) b->data;
index = frame_index(zone, frame);
printf("%zd", index);
printf("%" PRIi, index);
}
 
/** Buddy system find_buddy implementation
844,7 → 844,7
*/
uintptr_t zone_conf_size(count_t count)
{
int size = sizeof(zone_t) + count*sizeof(frame_t);
int size = sizeof(zone_t) + count * sizeof(frame_t);
int max_order;
 
max_order = fnzb(count);
1159,26 → 1159,31
 
ipl = interrupts_disable();
spinlock_lock(&zones.lock);
 
#ifdef __32_BITS__
printf("# base address free frames busy frames\n");
printf("-- ------------ ------------ ------------\n");
#endif
 
#ifdef __64_BITS__
printf("# base address free frames busy frames\n");
printf("-- -------------------- ------------ ------------\n");
#endif
if (sizeof(void *) == 4) {
printf("# base address free frames busy frames\n");
printf("-- ------------ ------------ ------------\n");
} else {
printf("# base address free frames busy frames\n");
printf("-- -------------------- ------------ ------------\n");
}
for (i = 0; i < zones.count; i++) {
zone = zones.info[i];
spinlock_lock(&zone->lock);
 
#ifdef __32_BITS__
printf("%-2u %10p %12" PRIc " %12" PRIc "\n", i, PFN2ADDR(zone->base),
zone->free_count, zone->busy_count);
#endif
 
#ifdef __64_BITS__
printf("%-2u %18p %12" PRIc " %12" PRIc "\n", i, PFN2ADDR(zone->base),
zone->free_count, zone->busy_count);
#endif
if (sizeof(void *) == 4)
printf("%-2d %#10zx %12zd %12zd\n", i, PFN2ADDR(zone->base),
zone->free_count, zone->busy_count);
else
printf("%-2d %#18zx %12zd %12zd\n", i, PFN2ADDR(zone->base),
zone->free_count, zone->busy_count);
spinlock_unlock(&zone->lock);
}
1211,13 → 1216,12
spinlock_lock(&zone->lock);
printf("Memory zone information\n");
printf("Zone base address: %#.*p\n", sizeof(uintptr_t) * 2,
PFN2ADDR(zone->base));
printf("Zone size: %zd frames (%zd KB)\n", zone->count,
printf("Zone base address: %p\n", PFN2ADDR(zone->base));
printf("Zone size: %" PRIc " frames (%" PRIs " KB)\n", zone->count,
SIZE2KB(FRAMES2SIZE(zone->count)));
printf("Allocated space: %zd frames (%zd KB)\n", zone->busy_count,
printf("Allocated space: %" PRIc " frames (%" PRIs " KB)\n", zone->busy_count,
SIZE2KB(FRAMES2SIZE(zone->busy_count)));
printf("Available space: %zd frames (%zd KB)\n", zone->free_count,
printf("Available space: %" PRIc " frames (%" PRIs " KB)\n", zone->free_count,
SIZE2KB(FRAMES2SIZE(zone->free_count)));
buddy_system_structure_print(zone->buddy_system, FRAME_SIZE);
spinlock_unlock(&zone->lock);
/branches/tracing/kernel/generic/src/mm/page.c
40,11 → 40,28
* They however, define the single interface.
*/
 
/*
* Note on memory prefetching and updating memory mappings, also described in:
* AMD x86-64 Architecture Programmer's Manual, Volume 2, System Programming,
* 7.2.1 Special Coherency Considerations.
*
* The processor which modifies a page table mapping can access prefetched data
* from the old mapping. In order to prevent this, we place a memory barrier
* after a mapping is updated.
*
* We assume that the other processors are either not using the mapping yet
* (i.e. during the bootstrap) or are executing the TLB shootdown code. While
* we don't care much about the former case, the processors in the latter case
* will do an implicit serialization by virtue of running the TLB shootdown
* interrupt handler.
*/
 
#include <mm/page.h>
#include <arch/mm/page.h>
#include <arch/mm/asid.h>
#include <mm/as.h>
#include <mm/frame.h>
#include <arch/barrier.h>
#include <arch/types.h>
#include <arch/asm.h>
#include <memstr.h>
65,8 → 82,8
* considering possible crossings
* of page boundaries.
*
* @param s Address of the structure.
* @param size Size of the structure.
* @param s Address of the structure.
* @param size Size of the structure.
*/
void map_structure(uintptr_t s, size_t size)
{
76,8 → 93,11
cnt = length / PAGE_SIZE + (length % PAGE_SIZE > 0);
 
for (i = 0; i < cnt; i++)
page_mapping_insert(AS_KERNEL, s + i * PAGE_SIZE, s + i * PAGE_SIZE, PAGE_NOT_CACHEABLE | PAGE_WRITE);
page_mapping_insert(AS_KERNEL, s + i * PAGE_SIZE,
s + i * PAGE_SIZE, PAGE_NOT_CACHEABLE | PAGE_WRITE);
 
/* Repel prefetched accesses to the old mapping. */
memory_barrier();
}
 
/** Insert mapping of page to frame.
87,10 → 107,11
*
* The page table must be locked and interrupts must be disabled.
*
* @param as Address space to wich page belongs.
* @param page Virtual address of the page to be mapped.
* @param frame Physical address of memory frame to which the mapping is done.
* @param flags Flags to be used for mapping.
* @param as Address space to wich page belongs.
* @param page Virtual address of the page to be mapped.
* @param frame Physical address of memory frame to which the mapping is
* done.
* @param flags Flags to be used for mapping.
*/
void page_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int flags)
{
98,6 → 119,9
ASSERT(page_mapping_operations->mapping_insert);
page_mapping_operations->mapping_insert(as, page, frame, flags);
/* Repel prefetched accesses to the old mapping. */
memory_barrier();
}
 
/** Remove mapping of page.
108,8 → 132,8
*
* The page table must be locked and interrupts must be disabled.
*
* @param as Address space to wich page belongs.
* @param page Virtual address of the page to be demapped.
* @param as Address space to wich page belongs.
* @param page Virtual address of the page to be demapped.
*/
void page_mapping_remove(as_t *as, uintptr_t page)
{
117,6 → 141,9
ASSERT(page_mapping_operations->mapping_remove);
page_mapping_operations->mapping_remove(as, page);
 
/* Repel prefetched accesses to the old mapping. */
memory_barrier();
}
 
/** Find mapping for virtual page
125,10 → 152,11
*
* The page table must be locked and interrupts must be disabled.
*
* @param as Address space to wich page belongs.
* @param page Virtual page.
* @param as Address space to wich page belongs.
* @param page Virtual page.
*
* @return NULL if there is no such mapping; requested mapping otherwise.
* @return NULL if there is no such mapping; requested mapping
* otherwise.
*/
pte_t *page_mapping_find(as_t *as, uintptr_t page)
{
/branches/tracing/kernel/generic/src/mm/backend_elf.c
48,6 → 48,7
#include <memstr.h>
#include <macros.h>
#include <arch.h>
#include <arch/barrier.h>
 
#ifdef CONFIG_VIRT_IDX_DCACHE
#include <arch/mm/cache.h>
67,12 → 68,13
*
* The address space area and page tables must be already locked.
*
* @param area Pointer to the address space area.
* @param addr Faulting virtual address.
* @param access Access mode that caused the fault (i.e. read/write/exec).
* @param area Pointer to the address space area.
* @param addr Faulting virtual address.
* @param access Access mode that caused the fault (i.e.
* read/write/exec).
*
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e.
* serviced).
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK
* on success (i.e. serviced).
*/
int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)
{
150,6 → 152,10
frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
memcpy((void *) PA2KA(frame),
(void *) (base + i * FRAME_SIZE), FRAME_SIZE);
if (entry->p_flags & PF_X) {
smc_coherence_block((void *) PA2KA(frame),
FRAME_SIZE);
}
dirty = true;
} else {
frame = KA2PA(base + i * FRAME_SIZE);
162,7 → 168,7
* and cleared.
*/
frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
memsetb(PA2KA(frame), FRAME_SIZE, 0);
memsetb((void *) PA2KA(frame), FRAME_SIZE, 0);
dirty = true;
} else {
size_t pad_lo, pad_hi;
187,8 → 193,13
memcpy((void *) (PA2KA(frame) + pad_lo),
(void *) (base + i * FRAME_SIZE + pad_lo),
FRAME_SIZE - pad_lo - pad_hi);
memsetb(PA2KA(frame), pad_lo, 0);
memsetb(PA2KA(frame) + FRAME_SIZE - pad_hi, pad_hi, 0);
if (entry->p_flags & PF_X) {
smc_coherence_block((void *) (PA2KA(frame) + pad_lo),
FRAME_SIZE - pad_lo - pad_hi);
}
memsetb((void *) PA2KA(frame), pad_lo, 0);
memsetb((void *) (PA2KA(frame) + FRAME_SIZE - pad_hi), pad_hi,
0);
dirty = true;
}
 
212,9 → 223,10
*
* The address space area and page tables must be already locked.
*
* @param area Pointer to the address space area.
* @param page Page that is mapped to frame. Must be aligned to PAGE_SIZE.
* @param frame Frame to be released.
* @param area Pointer to the address space area.
* @param page Page that is mapped to frame. Must be aligned to
* PAGE_SIZE.
* @param frame Frame to be released.
*
*/
void elf_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame)
257,7 → 269,7
*
* The address space and address space area must be locked prior to the call.
*
* @param area Address space area.
* @param area Address space area.
*/
void elf_share(as_area_t *area)
{
/branches/tracing/kernel/generic/src/syscall/syscall.c
51,15 → 51,14
#include <syscall/copy.h>
#include <sysinfo/sysinfo.h>
#include <console/console.h>
#include <console/klog.h>
#include <udebug/udebug.h>
 
/** Print using kernel facility
*
* Some simulators can print only through kernel. Userspace can use
* this syscall to facilitate it.
* Print to kernel log.
*
*/
static unative_t sys_io(int fd, const void * buf, size_t count)
static unative_t sys_klog(int fd, const void * buf, size_t count)
{
size_t i;
char *data;
67,20 → 66,23
 
if (count > PAGE_SIZE)
return ELIMIT;
 
data = (char *) malloc(count, 0);
if (!data)
return ENOMEM;
rc = copy_from_uspace(data, buf, count);
if (rc) {
if (count > 0) {
data = (char *) malloc(count, 0);
if (!data)
return ENOMEM;
rc = copy_from_uspace(data, buf, count);
if (rc) {
free(data);
return rc;
}
for (i = 0; i < count; i++)
putchar(data[i]);
free(data);
return rc;
}
 
for (i = 0; i < count; i++)
putchar(data[i]);
free(data);
} else
klog_update();
return count;
}
106,8 → 108,7
udebug_stoppable_begin();
rc = syscall_table[id](a1, a2, a3, a4, a5, a6);
} else {
klog_printf("TASK %llu: Unknown syscall id %llx", TASK->taskid,
id);
printf("Task %" PRIu64": Unknown syscall %#" PRIxn, TASK->taskid, id);
task_kill(TASK->taskid);
thread_exit();
}
123,7 → 124,7
}
 
syshandler_t syscall_table[SYSCALL_END] = {
(syshandler_t) sys_io,
(syshandler_t) sys_klog,
(syshandler_t) sys_tls_set,
/* Thread and task related syscalls. */
130,7 → 131,9
(syshandler_t) sys_thread_create,
(syshandler_t) sys_thread_exit,
(syshandler_t) sys_thread_get_id,
(syshandler_t) sys_task_get_id,
(syshandler_t) sys_task_spawn,
/* Synchronization related syscalls. */
(syshandler_t) sys_futex_sleep_timeout,
/branches/tracing/kernel/generic/src/ipc/sysipc.c
437,7 → 437,7
phone_t *phone;
int res;
int rc;
 
GET_CHECK_PHONE(phone, phoneid, return ENOENT);
 
ipc_call_static_init(&call);
637,7 → 637,7
IPC_SET_RETVAL(call->data, EFORWARD);
ipc_answer(&TASK->answerbox, call);
return ENOENT;
});
});
 
if (!method_is_forwardable(IPC_GET_METHOD(call->data))) {
IPC_SET_RETVAL(call->data, EFORWARD);
875,7 → 875,7
return 0;
}
 
#include <console/klog.h>
#include <console/console.h>
 
/**
* Syscall connect to a task by id.
891,7 → 891,7
if (rc != 0)
return (unative_t) rc;
 
klog_printf("sys_ipc_connect_kbox(%lld, %d)", taskid_arg.value);
printf("sys_ipc_connect_kbox(%lld, %d)\n", taskid_arg.value);
 
return ipc_connect_kbox(taskid_arg.value);
}
/branches/tracing/kernel/generic/src/ipc/ipc.c
51,7 → 51,7
#include <debug.h>
 
#include <print.h>
#include <console/klog.h>
#include <console/console.h>
#include <proc/thread.h>
#include <arch/interrupt.h>
#include <ipc/irq.h>
67,7 → 67,7
*/
static void _ipc_call_init(call_t *call)
{
memsetb((uintptr_t) call, sizeof(*call), 0);
memsetb(call, sizeof(*call), 0);
call->callerbox = &TASK->answerbox;
call->sender = TASK;
call->buffer = NULL;
516,10 → 516,10
ipc_answerbox_slam_phones(&TASK->kernel_box, have_kb_thread);
if (have_kb_thread) {
klog_printf("join kb_thread..");
printf("join kb_thread..\n");
thread_join(TASK->kb_thread);
thread_detach(TASK->kb_thread);
klog_printf("join done");
printf("join done\n");
TASK->kb_thread = NULL;
}
 
652,7 → 652,7
default:
break;
}
printf("active: %d\n",
printf("active: %ld\n",
atomic_get(&task->phones[i].active_calls));
}
mutex_unlock(&task->phones[i].lock);
665,8 → 665,9
for (tmp = task->answerbox.calls.next; tmp != &task->answerbox.calls;
tmp = tmp->next) {
call = list_get_instance(tmp, call_t, link);
printf("Callid: %p Srctask:%llu M:%d A1:%d A2:%d A3:%d "
"A4:%d A5:%d Flags:%x\n", call, call->sender->taskid,
printf("Callid: %p Srctask:%" PRIu64 " M:%" PRIun
" A1:%" PRIun " A2:%" PRIun " A3:%" PRIun
" A4:%" PRIun " A5:%" PRIun " Flags:%x\n", call, call->sender->taskid,
IPC_GET_METHOD(call->data), IPC_GET_ARG1(call->data),
IPC_GET_ARG2(call->data), IPC_GET_ARG3(call->data),
IPC_GET_ARG4(call->data), IPC_GET_ARG5(call->data),
674,12 → 675,13
}
/* Print answerbox - calls */
printf("ABOX - DISPATCHED CALLS:\n");
for (tmp = task->answerbox.dispatched_calls.next;
tmp != &task->answerbox.dispatched_calls;
tmp = tmp->next) {
for (tmp = task->answerbox.dispatched_calls.next;
tmp != &task->answerbox.dispatched_calls;
tmp = tmp->next) {
call = list_get_instance(tmp, call_t, link);
printf("Callid: %p Srctask:%llu M:%d A1:%d A2:%d A3:%d "
"A4:%d A5:%d Flags:%x\n", call, call->sender->taskid,
printf("Callid: %p Srctask:%" PRIu64 " M:%" PRIun
" A1:%" PRIun " A2:%" PRIun " A3:%" PRIun
" A4:%" PRIun " A5:%" PRIun " Flags:%x\n", call, call->sender->taskid,
IPC_GET_METHOD(call->data), IPC_GET_ARG1(call->data),
IPC_GET_ARG2(call->data), IPC_GET_ARG3(call->data),
IPC_GET_ARG4(call->data), IPC_GET_ARG5(call->data),
690,7 → 692,8
for (tmp = task->answerbox.answers.next; tmp != &task->answerbox.answers;
tmp = tmp->next) {
call = list_get_instance(tmp, call_t, link);
printf("Callid:%p M:%d A1:%d A2:%d A3:%d A4:%d A5:%d Flags:%x\n",
printf("Callid:%p M:%" PRIun " A1:%" PRIun " A2:%" PRIun
" A3:%" PRIun " A4:%" PRIun " A5:%" PRIun " Flags:%x\n",
call, IPC_GET_METHOD(call->data), IPC_GET_ARG1(call->data),
IPC_GET_ARG2(call->data), IPC_GET_ARG3(call->data),
IPC_GET_ARG4(call->data), IPC_GET_ARG5(call->data),
702,7 → 705,7
}
 
#include <ipc/ipcrsc.h>
#include <console/klog.h>
#include <print.h>
#include <udebug/udebug_ipc.h>
 
static void kbox_thread_proc(void *arg)
713,11 → 716,11
ipl_t ipl;
 
(void)arg;
klog_printf("kbox_thread_proc()");
printf("kbox_thread_proc()\n");
done = false;
 
while (!done) {
//klog_printf("kbox: wait for call");
//printf("kbox: wait for call\n");
call = ipc_wait_for_call(&TASK->kernel_box, SYNCH_NO_TIMEOUT,
SYNCH_FLAGS_NONE);
 
729,12 → 732,12
}
 
if (method == IPC_M_PHONE_HUNGUP) {
klog_printf("kbox: handle hangup message");
printf("kbox: handle hangup message\n");
 
/* Was it our debugger, who hung up? */
if (call->sender == TASK->udebug.debugger) {
/* Terminate debugging session (if any) */
klog_printf("kbox: terminate debug session");
printf("kbox: terminate debug session\n");
ipl = interrupts_disable();
spinlock_lock(&TASK->lock);
udebug_task_cleanup(TASK);
741,10 → 744,10
spinlock_unlock(&TASK->lock);
interrupts_restore(ipl);
} else {
klog_printf("kbox: was not debugger");
printf("kbox: was not debugger\n");
}
 
klog_printf("kbox: continue with hangup message");
printf("kbox: continue with hangup message\n");
IPC_SET_RETVAL(call->data, 0);
ipc_answer(&TASK->kernel_box, call);
 
755,7 → 758,7
/* Last phone has been disconnected */
TASK->kb_thread = NULL;
done = true;
klog_printf("phone list is empty");
printf("phone list is empty\n");
}
spinlock_unlock(&TASK->answerbox.lock);
spinlock_unlock(&TASK->lock);
764,7 → 767,7
}
}
 
klog_printf("kbox: finished");
printf("kbox: finished\n");
}
 
 
/branches/tracing/kernel/generic/src/udebug/udebug_ipc.c
35,7 → 35,7
* @brief Udebug IPC message handling.
*/
#include <console/klog.h>
#include <print.h>
#include <proc/task.h>
#include <proc/thread.h>
#include <arch.h>
53,7 → 53,7
int rc;
void *buffer;
 
klog_printf("debug_regs_write()");
printf("debug_regs_write()\n");
 
uspace_data = (void *)IPC_GET_ARG3(call->data);
to_copy = sizeof(istate_t);
61,13 → 61,13
 
rc = copy_from_uspace(buffer, uspace_data, to_copy);
if (rc != 0) {
klog_printf("debug_regs_write() - copy failed");
printf("debug_regs_write() - copy failed\n");
return rc;
}
 
call->buffer = buffer;
 
klog_printf(" - done");
printf(" - done\n");
return 0;
}
 
78,7 → 78,7
int rc;
void *buffer;
 
klog_printf("udebug_rp_mem_write()");
printf("udebug_rp_mem_write()\n");
 
uspace_data = (void *)IPC_GET_ARG2(call->data);
to_copy = IPC_GET_ARG4(call->data);
87,13 → 87,13
 
rc = copy_from_uspace(buffer, uspace_data, to_copy);
if (rc != 0) {
klog_printf(" - copy failed");
printf(" - copy failed\n");
return rc;
}
 
call->buffer = buffer;
 
klog_printf(" - done");
printf(" - done\n");
return 0;
}
 
161,7 → 161,7
thread_t *t;
int rc;
 
//klog_printf("debug_go()");
//printf("debug_go()\n");
 
t = (thread_t *)IPC_GET_ARG2(call->data);
 
178,7 → 178,7
thread_t *t;
int rc;
 
klog_printf("debug_stop()");
printf("debug_stop()\n");
 
t = (thread_t *)IPC_GET_ARG2(call->data);
 
278,7 → 278,7
void *buffer;
int rc;
 
klog_printf("debug_regs_read()");
printf("debug_regs_read()\n");
 
t = (thread_t *) IPC_GET_ARG2(call->data);
buffer = malloc(sizeof(istate_t), 0);
371,7 → 371,7
unsigned size;
int rc;
 
klog_printf("udebug_receive_mem_write()");
printf("udebug_receive_mem_write()\n");
 
uspace_dst = IPC_GET_ARG3(call->data);
size = IPC_GET_ARG4(call->data);
/branches/tracing/kernel/generic/src/udebug/udebug.c
48,7 → 48,7
*/
#include <synch/waitq.h>
#include <console/klog.h>
#include <print.h>
#include <udebug/udebug.h>
#include <errno.h>
#include <arch.h>
288,7 → 288,7
return;
}
 
//klog_printf("udebug_syscall_event");
//printf("udebug_syscall_event\n");
call = THREAD->udebug.go_call;
THREAD->udebug.go_call = NULL;
 
296,7 → 296,7
IPC_SET_ARG1(call->data, etype);
IPC_SET_ARG2(call->data, id);
IPC_SET_ARG3(call->data, rc);
//klog_printf("udebug_syscall_event/ipc_answer");
//printf("udebug_syscall_event/ipc_answer\n");
 
THREAD->udebug.syscall_args[0] = a1;
THREAD->udebug.syscall_args[1] = a2;
332,12 → 332,12
mutex_lock(&TASK->udebug.lock);
mutex_lock(&THREAD->udebug.lock);
 
klog_printf("udebug_thread_b_event");
klog_printf("- check state");
printf("udebug_thread_b_event\n");
printf("- check state\n");
 
/* Must only generate events when in debugging session */
if (THREAD->udebug.debug_active != true) {
klog_printf("- debug_active: %s, udebug.stop: %s",
printf("- debug_active: %s, udebug.stop: %s\n",
THREAD->udebug.debug_active ? "yes(+)" : "no(-)",
THREAD->udebug.stop ? "yes(-)" : "no(+)");
mutex_unlock(&THREAD->udebug.lock);
345,7 → 345,7
return;
}
 
klog_printf("- trigger event");
printf("- trigger event\n");
 
call = THREAD->udebug.go_call;
THREAD->udebug.go_call = NULL;
366,7 → 366,7
mutex_unlock(&THREAD->udebug.lock);
mutex_unlock(&TASK->udebug.lock);
 
klog_printf("- sleep");
printf("- sleep\n");
udebug_wait_for_go(&THREAD->udebug.go_wq);
 
udebug_int_unlock();
381,12 → 381,12
mutex_lock(&TASK->udebug.lock);
mutex_lock(&THREAD->udebug.lock);
 
klog_printf("udebug_thread_e_event");
klog_printf("- check state");
printf("udebug_thread_e_event\n");
printf("- check state\n");
 
/* Must only generate events when in debugging session */
if (THREAD->udebug.debug_active != true) {
klog_printf("- debug_active: %s, udebug.stop: %s",
printf("- debug_active: %s, udebug.stop: %s\n",
THREAD->udebug.debug_active ? "yes(+)" : "no(-)",
THREAD->udebug.stop ? "yes(-)" : "no(+)");
mutex_unlock(&THREAD->udebug.lock);
394,7 → 394,7
return;
}
 
klog_printf("- trigger event");
printf("- trigger event\n");
 
call = THREAD->udebug.go_call;
THREAD->udebug.go_call = NULL;
441,7 → 441,7
return;
}
 
klog_printf("udebug_breakpoint/trap_event");
printf("udebug_breakpoint/trap_event\n");
call = THREAD->udebug.go_call;
THREAD->udebug.go_call = NULL;
 
457,7 → 457,7
THREAD->udebug.stop = true;
THREAD->udebug.cur_event = etype;
 
klog_printf("- send answer");
printf("- send answer\n");
ipc_answer(&TASK->answerbox, call);
 
mutex_unlock(&THREAD->udebug.lock);
491,14 → 491,14
int flags;
ipl_t ipl;
 
klog_printf("udebug_task_cleanup()");
klog_printf("task %llu", ta->taskid);
printf("udebug_task_cleanup()\n");
printf("task %llu\n", ta->taskid);
 
udebug_int_lock();
 
if (ta->udebug.dt_state != UDEBUG_TS_BEGINNING &&
ta->udebug.dt_state != UDEBUG_TS_ACTIVE) {
klog_printf("udebug_task_cleanup(): task not being debugged");
printf("udebug_task_cleanup(): task not being debugged\n");
return EINVAL;
}
 
531,7 → 531,7
t->udebug.stop = true;
 
/* Answer GO call */
klog_printf("answer GO call with EVENT_FINISHED");
printf("answer GO call with EVENT_FINISHED\n");
IPC_SET_RETVAL(t->udebug.go_call->data, 0);
IPC_SET_ARG1(t->udebug.go_call->data, UDEBUG_EVENT_FINISHED);
 
/branches/tracing/kernel/generic/src/udebug/udebug_ops.c
35,7 → 35,7
* @brief Udebug operations.
*/
#include <console/klog.h>
#include <print.h>
#include <proc/task.h>
#include <proc/thread.h>
#include <arch.h>
162,14 → 162,14
thread_t *t;
link_t *cur;
 
klog_printf("udebug_begin()");
printf("udebug_begin()\n");
 
mutex_lock(&TASK->udebug.lock);
klog_printf("debugging task %llu", TASK->taskid);
printf("debugging task %llu\n", TASK->taskid);
 
if (TASK->udebug.dt_state != UDEBUG_TS_INACTIVE) {
mutex_unlock(&TASK->udebug.lock);
klog_printf("udebug_begin(): busy error");
printf("udebug_begin(): busy error\n");
 
return EBUSY;
}
199,7 → 199,7
 
mutex_unlock(&TASK->udebug.lock);
 
klog_printf("udebug_begin() done (%s)",
printf("udebug_begin() done (%s)\n",
reply ? "reply" : "stoppability wait");
 
return reply;
209,10 → 209,10
{
int rc;
 
klog_printf("udebug_end()");
printf("udebug_end()\n");
 
mutex_lock(&TASK->udebug.lock);
klog_printf("task %llu", TASK->taskid);
printf("task %llu\n", TASK->taskid);
 
rc = udebug_task_cleanup(TASK);
 
223,15 → 223,15
 
int udebug_set_evmask(udebug_evmask_t mask)
{
klog_printf("udebug_set_mask()");
printf("udebug_set_mask()\n");
 
klog_printf("debugging task %llu", TASK->taskid);
printf("debugging task %llu\n", TASK->taskid);
 
mutex_lock(&TASK->udebug.lock);
 
if (TASK->udebug.dt_state != UDEBUG_TS_ACTIVE) {
mutex_unlock(&TASK->udebug.lock);
klog_printf("udebug_set_mask(): not active debuging session");
printf("udebug_set_mask(): not active debuging session\n");
 
return EINVAL;
}
248,7 → 248,7
{
int rc;
 
// klog_printf("udebug_go()");
// printf("udebug_go()\n");
 
/* On success, this will lock t->udebug.lock */
rc = _thread_op_begin(t, false);
274,7 → 274,7
{
int rc;
 
klog_printf("udebug_stop()");
printf("udebug_stop()\n");
mutex_lock(&TASK->udebug.lock);
 
/*
298,7 → 298,7
/*
* Answer GO call
*/
klog_printf("udebug_stop - answering go call");
printf("udebug_stop - answering go call\n");
 
/* Make sure nobody takes this call away from us */
call = t->udebug.go_call;
306,7 → 306,7
 
IPC_SET_RETVAL(call->data, 0);
IPC_SET_ARG1(call->data, UDEBUG_EVENT_STOP);
klog_printf("udebug_stop/ipc_answer");
printf("udebug_stop/ipc_answer\n");
 
THREAD->udebug.cur_event = UDEBUG_EVENT_STOP;
 
315,7 → 315,7
ipc_answer(&TASK->answerbox, call);
mutex_unlock(&TASK->udebug.lock);
 
klog_printf("udebog_stop/done");
printf("udebog_stop/done\n");
return 0;
}
 
330,7 → 330,7
int flags;
size_t max_ids;
 
klog_printf("udebug_thread_read()");
printf("udebug_thread_read()\n");
 
/* Allocate a buffer to hold thread IDs */
id_buffer = malloc(buf_size, 0);
385,7 → 385,7
int rc;
unative_t *arg_buffer;
 
// klog_printf("udebug_args_read()");
// printf("udebug_args_read()\n");
 
/* Prepare a buffer to hold the arguments */
arg_buffer = malloc(6 * sizeof(unative_t), 0);
417,7 → 417,7
istate_t *state;
int rc;
 
// klog_printf("udebug_regs_read()");
// printf("udebug_regs_read()\n");
 
/* On success, this will lock t->udebug.lock */
rc = _thread_op_begin(t, false);
428,7 → 428,7
state = t->udebug.uspace_state;
if (state == NULL) {
_thread_op_end(t);
klog_printf("udebug_regs_read() - istate not available");
printf("udebug_regs_read() - istate not available\n");
return EBUSY;
}
 
445,7 → 445,7
int rc;
istate_t *state;
 
klog_printf("udebug_regs_write()");
printf("udebug_regs_write()\n");
 
/* Try to change the thread's uspace_state */
 
452,7 → 452,7
/* On success, this will lock t->udebug.lock */
rc = _thread_op_begin(t, false);
if (rc != EOK) {
klog_printf("error locking thread");
printf("error locking thread\n");
return rc;
}
 
459,7 → 459,7
state = t->udebug.uspace_state;
if (state == NULL) {
_thread_op_end(t);
klog_printf("udebug_regs_write() - istate not available");
printf("udebug_regs_write() - istate not available\n");
return EBUSY;
}
 
486,7 → 486,7
 
data_buffer = malloc(n, 0);
 
// klog_printf("udebug_mem_read: src=%u, size=%u", uspace_addr, n);
// printf("udebug_mem_read: src=%u, size=%u\n", uspace_addr, n);
 
/* NOTE: this is not strictly from a syscall... but that shouldn't
* be a problem */
503,7 → 503,7
{
int rc;
 
klog_printf("udebug_mem_write()");
printf("udebug_mem_write()\n");
 
/* n must be positive */
if (n < 1)
517,7 → 517,7
return EBUSY;
}
klog_printf("dst=%u, size=%u", uspace_addr, n);
printf("dst=%u, size=%u\n", uspace_addr, n);
 
/* NOTE: this is not strictly from a syscall... but that shouldn't
* be a problem */
526,7 → 526,7
 
rc = as_debug_write(uspace_addr, data, n);
klog_printf("rc=%d\n", rc);
printf("rc=%d\n", rc);
 
mutex_unlock(&TASK->udebug.lock);
 
/branches/tracing/kernel/Makefile
43,11 → 43,11
-DKERNEL
 
GCC_CFLAGS = -I$(INCLUDES) -O$(OPTIMIZATION) \
-fno-builtin -fomit-frame-pointer -Wall -Wextra -Wno-unused-parameter -Wmissing-prototypes -Werror \
-fno-builtin -Wall -Wextra -Wno-unused-parameter -Wmissing-prototypes -Werror \
-nostdlib -nostdinc
 
ICC_CFLAGS = -I$(INCLUDES) -O$(OPTIMIZATION) \
-fno-builtin -fomit-frame-pointer -Wall -Wmissing-prototypes -Werror \
-fno-builtin -Wall -Wmissing-prototypes -Werror \
-nostdlib -nostdinc \
-wd170
 
90,6 → 90,10
DEFS += -DCONFIG_DEBUG
endif
 
ifeq ($(CONFIG_EDEBUG),y)
DEFS += -DCONFIG_EDEBUG
endif
 
ifeq ($(CONFIG_DEBUG_SPINLOCK),y)
DEFS += -DCONFIG_DEBUG_SPINLOCK
endif
217,7 → 221,6
generic/src/console/chardev.c \
generic/src/console/console.c \
generic/src/console/kconsole.c \
generic/src/console/klog.c \
generic/src/console/cmd.c \
generic/src/cpu/cpu.c \
generic/src/ddi/ddi.c \
393,5 → 396,15
%.o: %.s
$(AS) $(AFLAGS) $< -o $@
 
#
# The FPU tests are the only objects for which we allow the compiler to generate
# FPU instructions.
#
test/fpu/%.o: test/fpu/%.c
$(CC) $(DEFS) $(CFLAGS) $(EXTRA_FLAGS) -c $< -o $@
 
#
# Ordinary objects.
#
%.o: %.c
$(CC) $(DEFS) $(CFLAGS) $(EXTRA_FLAGS) -c $< -o $@
$(CC) $(DEFS) $(CFLAGS) $(EXTRA_FLAGS) $(FPU_NO_CFLAGS) -c $< -o $@
/branches/tracing/kernel/arch/sparc64/include/types.h
35,10 → 35,6
#ifndef KERN_sparc64_TYPES_H_
#define KERN_sparc64_TYPES_H_
 
#define NULL 0
#define false 0
#define true 1
 
typedef signed char int8_t;
typedef signed short int16_t;
typedef signed int int32_t;
61,13 → 57,31
typedef uint64_t unative_t;
typedef int64_t native_t;
 
typedef uint8_t bool;
typedef uint64_t thread_id_t;
typedef uint64_t task_id_t;
typedef uint32_t context_id_t;
/**< Formats for uintptr_t, size_t, count_t and index_t */
#define PRIp "llx"
#define PRIs "llu"
#define PRIc "llu"
#define PRIi "llu"
 
typedef int32_t inr_t;
typedef int32_t devno_t;
/**< Formats for (u)int8_t, (u)int16_t, (u)int32_t, (u)int64_t and (u)native_t */
#define PRId8 "d"
#define PRId16 "d"
#define PRId32 "d"
#define PRId64 "lld"
#define PRIdn "lld"
 
#define PRIu8 "u"
#define PRIu16 "u"
#define PRIu32 "u"
#define PRIu64 "llu"
#define PRIun "llu"
 
#define PRIx8 "x"
#define PRIx16 "x"
#define PRIx32 "x"
#define PRIx64 "llx"
#define PRIxn "llx"
 
typedef uint8_t asi_t;
 
#endif
/branches/tracing/kernel/arch/sparc64/include/memstr.h
37,10 → 37,10
 
#define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt))
 
extern void memsetw(uintptr_t dst, size_t cnt, uint16_t x);
extern void memsetb(uintptr_t dst, size_t cnt, uint8_t x);
extern void memsetw(void *dst, size_t cnt, uint16_t x);
extern void memsetb(void *dst, size_t cnt, uint8_t x);
 
extern int memcmp(uintptr_t src, uintptr_t dst, int cnt);
extern int memcmp(const void *a, const void *b, size_t cnt);
 
#endif
 
/branches/tracing/kernel/arch/sparc64/include/asm.h
37,6 → 37,7
 
#include <arch/arch.h>
#include <arch/types.h>
#include <typedefs.h>
#include <align.h>
#include <arch/register.h>
#include <config.h>
/branches/tracing/kernel/arch/sparc64/include/mm/cache_spec.h
0,0 → 1,57
/*
* Copyright (c) 2008 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup sparc64mm
* @{
*/
/** @file
*/
 
#ifndef KERN_sparc64_CACHE_SPEC_H_
#define KERN_sparc64_CACHE_SPEC_H_
 
/*
* The following macros are valid for the following processors:
*
* UltraSPARC, UltraSPARC II, UltraSPARC IIi
*
* Should we support other UltraSPARC processors, we need to make sure that
* the macros are defined correctly for them.
*/
 
#define DCACHE_SIZE (16 * 1024)
#define DCACHE_LINE_SIZE 32
 
#define ICACHE_SIZE (16 * 1024)
#define ICACHE_WAYS 2
#define ICACHE_LINE_SIZE 32
 
#endif
 
/** @}
*/
/branches/tracing/kernel/arch/sparc64/include/mm/tlb.h
160,7 → 160,7
static inline void mmu_primary_context_write(uint64_t v)
{
asi_u64_write(ASI_DMMU, VA_PRIMARY_CONTEXT_REG, v);
flush();
flush_pipeline();
}
 
/** Read MMU Secondary Context Register.
179,7 → 179,7
static inline void mmu_secondary_context_write(uint64_t v)
{
asi_u64_write(ASI_DMMU, VA_SECONDARY_CONTEXT_REG, v);
flush();
flush_pipeline();
}
 
/** Read IMMU TLB Data Access Register.
209,7 → 209,7
reg.value = 0;
reg.tlb_entry = entry;
asi_u64_write(ASI_ITLB_DATA_ACCESS_REG, reg.value, value);
flush();
flush_pipeline();
}
 
/** Read DMMU TLB Data Access Register.
279,7 → 279,7
static inline void itlb_tag_access_write(uint64_t v)
{
asi_u64_write(ASI_IMMU, VA_IMMU_TAG_ACCESS, v);
flush();
flush_pipeline();
}
 
/** Read IMMU TLB Tag Access Register.
318,7 → 318,7
static inline void itlb_data_in_write(uint64_t v)
{
asi_u64_write(ASI_ITLB_DATA_IN_REG, 0, v);
flush();
flush_pipeline();
}
 
/** Write DMMU TLB Data in Register.
347,7 → 347,7
static inline void itlb_sfsr_write(uint64_t v)
{
asi_u64_write(ASI_IMMU, VA_IMMU_SFSR, v);
flush();
flush_pipeline();
}
 
/** Read DTLB Synchronous Fault Status Register.
400,7 → 400,7
asi_u64_write(ASI_IMMU_DEMAP, da.value, 0); /* da.value is the
* address within the
* ASI */
flush();
flush_pipeline();
}
 
/** Perform DMMU TLB Demap Operation.
/branches/tracing/kernel/arch/sparc64/include/barrier.h
57,8 → 57,11
#define write_barrier() \
asm volatile ("membar #StoreStore\n" ::: "memory")
 
/** Flush Instruction Memory instruction. */
static inline void flush(void)
#define flush(a) \
asm volatile ("flush %0\n" :: "r" ((a)) : "memory")
 
/** Flush Instruction pipeline. */
static inline void flush_pipeline(void)
{
/*
* The FLUSH instruction takes address parameter.
79,6 → 82,21
asm volatile ("membar #Sync\n");
}
 
#define smc_coherence(a) \
{ \
write_barrier(); \
flush((a)); \
}
 
#define FLUSH_INVAL_MIN 4
#define smc_coherence_block(a, l) \
{ \
unsigned long i; \
write_barrier(); \
for (i = 0; i < (l); i += FLUSH_INVAL_MIN) \
flush((void *)(a) + i); \
}
 
#endif
 
/** @}
/branches/tracing/kernel/arch/sparc64/include/cpu.h
36,6 → 36,7
#define KERN_sparc64_CPU_H_
 
#include <arch/types.h>
#include <typedefs.h>
#include <arch/register.h>
#include <arch/asm.h>
 
/branches/tracing/kernel/arch/sparc64/src/smp/smp.c
99,7 → 99,7
waking_up_mid = mid;
if (waitq_sleep_timeout(&ap_completion_wq, 1000000, SYNCH_FLAGS_NONE) == ESYNCH_TIMEOUT)
printf("%s: waiting for processor (mid = %d) timed out\n",
printf("%s: waiting for processor (mid = %" PRIu32 ") timed out\n",
__func__, mid);
}
}
/branches/tracing/kernel/arch/sparc64/src/trap/exception.c
45,9 → 45,9
 
void dump_istate(istate_t *istate)
{
printf("TSTATE=%#llx\n", istate->tstate);
printf("TPC=%#llx (%s)\n", istate->tpc, get_symtab_entry(istate->tpc));
printf("TNPC=%#llx (%s)\n", istate->tnpc, get_symtab_entry(istate->tnpc));
printf("TSTATE=%#" PRIx64 "\n", istate->tstate);
printf("TPC=%#" PRIx64 " (%s)\n", istate->tpc, get_symtab_entry(istate->tpc));
printf("TNPC=%#" PRIx64 " (%s)\n", istate->tnpc, get_symtab_entry(istate->tnpc));
}
 
/** Handle instruction_access_exception. (0x8) */
/branches/tracing/kernel/arch/sparc64/src/trap/interrupt.c
97,8 → 97,8
* Spurious interrupt.
*/
#ifdef CONFIG_DEBUG
printf("cpu%d: spurious interrupt (intrcv=%#llx, "
"data0=%#llx)\n", CPU->id, intrcv, data0);
printf("cpu%u: spurious interrupt (intrcv=%#" PRIx64
", data0=%#" PRIx64 ")\n", CPU->id, intrcv, data0);
#endif
}
 
/branches/tracing/kernel/arch/sparc64/src/cpu/cpu.c
135,7 → 135,7
break;
}
 
printf("cpu%d: manuf=%s, impl=%s, mask=%d (%dMHz)\n", m->id, manuf,
printf("cpu%d: manuf=%s, impl=%s, mask=%d (%d MHz)\n", m->id, manuf,
impl, m->arch.ver.mask, m->arch.clock_frequency / 1000000);
}
 
/branches/tracing/kernel/arch/sparc64/src/mm/as.c
76,7 → 76,7
as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT *
sizeof(tsb_entry_t));
 
memsetb((uintptr_t) as->arch.itsb,
memsetb(as->arch.itsb,
(ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * sizeof(tsb_entry_t), 0);
#endif
return 0;
/branches/tracing/kernel/arch/sparc64/src/mm/cache.S
27,10 → 27,8
*/
 
#include <arch/arch.h>
#include <arch/mm/cache_spec.h>
 
#define DCACHE_SIZE (16 * 1024)
#define DCACHE_LINE_SIZE 32
 
#define DCACHE_TAG_SHIFT 2
 
.register %g2, #scratch
/branches/tracing/kernel/arch/ia64/include/types.h
35,10 → 35,6
#ifndef KERN_ia64_TYPES_H_
#define KERN_ia64_TYPES_H_
 
#define NULL 0
#define false 0
#define true 1
 
typedef signed char int8_t;
typedef signed short int16_t;
typedef signed int int32_t;
69,14 → 65,29
typedef uint64_t unative_t;
typedef int64_t native_t;
 
typedef uint8_t bool;
typedef uint64_t thread_id_t;
typedef uint64_t task_id_t;
typedef uint32_t context_id_t;
#define PRIp "lx" /**< Format for uintptr_t. */
#define PRIs "lu" /**< Format for size_t. */
#define PRIc "lu" /**< Format for count_t. */
#define PRIi "lu" /**< Format for index_t. */
 
typedef int32_t inr_t;
typedef int32_t devno_t;
#define PRId8 "d" /**< Format for int8_t. */
#define PRId16 "d" /**< Format for int16_t. */
#define PRId32 "d" /**< Format for int32_t. */
#define PRId64 "ld" /**< Format for int64_t. */
#define PRIdn "d" /**< Format for native_t. */
 
#define PRIu8 "u" /**< Format for uint8_t. */
#define PRIu16 "u" /**< Format for uint16_t. */
#define PRIu32 "u" /**< Format for uint32_t. */
#define PRIu64 "lu" /**< Format for uint64_t. */
#define PRIun "u" /**< Format for unative_t. */
 
#define PRIx8 "x" /**< Format for hexadecimal (u)int8_t. */
#define PRIx16 "x" /**< Format for hexadecimal (u)int16_t. */
#define PRIx32 "x" /**< Format for hexadecimal (u)uint32_t. */
#define PRIx64 "lx" /**< Format for hexadecimal (u)int64_t. */
#define PRIxn "x" /**< Format for hexadecimal (u)native_t. */
 
#endif
 
/** @}
/branches/tracing/kernel/arch/ia64/include/memstr.h
37,10 → 37,10
 
#define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt))
 
extern void memsetw(uintptr_t dst, size_t cnt, uint16_t x);
extern void memsetb(uintptr_t dst, size_t cnt, uint8_t x);
extern void memsetw(void *dst, size_t cnt, uint16_t x);
extern void memsetb(void *dst, size_t cnt, uint8_t x);
 
extern int memcmp(uintptr_t src, uintptr_t dst, int cnt);
extern int memcmp(const void *a, const void *b, size_t cnt);
 
#endif
 
/branches/tracing/kernel/arch/ia64/include/barrier.h
45,9 → 45,33
#define read_barrier() memory_barrier()
#define write_barrier() memory_barrier()
 
#define srlz_i() asm volatile (";; srlz.i ;;\n" ::: "memory")
#define srlz_d() asm volatile (";; srlz.d\n" ::: "memory")
#define srlz_i() \
asm volatile (";; srlz.i ;;\n" ::: "memory")
#define srlz_d() \
asm volatile (";; srlz.d\n" ::: "memory")
 
#define fc_i(a) \
asm volatile ("fc.i %0\n" :: "r" ((a)) : "memory")
#define sync_i() \
asm volatile (";; sync.i\n" ::: "memory")
 
#define smc_coherence(a) \
{ \
fc_i((a)); \
sync_i(); \
srlz_i(); \
}
 
#define FC_INVAL_MIN 32
#define smc_coherence_block(a, l) \
{ \
unsigned long i; \
for (i = 0; i < (l); i += FC_INVAL_MIN) \
fc_i((void *)(a) + i); \
sync_i(); \
srlz_i(); \
}
 
#endif
 
/** @}
/branches/tracing/kernel/arch/ia64/src/mm/vhpt.c
81,7 → 81,7
 
void vhpt_invalidate_all()
{
memsetb((uintptr_t) vhpt_base, 1 << VHPT_WIDTH, 0);
memsetb(vhpt_base, 1 << VHPT_WIDTH, 0);
}
 
void vhpt_invalidate_asid(asid_t asid)
/branches/tracing/kernel/arch/ia64/src/drivers/ega.c
71,7 → 71,7
/*
* Clear the screen.
*/
_memsetw((uintptr_t) videoram, SCREEN, 0x0720);
_memsetw(videoram, SCREEN, 0x0720);
 
chardev_initialize("ega_out", &ega_console, &ega_ops);
stdout = &ega_console;
102,7 → 102,7
return;
 
memcpy((void *) videoram, (void *) (videoram + ROW * 2), (SCREEN - ROW) * 2);
_memsetw((uintptr_t) (videoram + (SCREEN - ROW) * 2), ROW, 0x0720);
_memsetw(videoram + (SCREEN - ROW) * 2, ROW, 0x0720);
ega_cursor = ega_cursor - ROW;
}
 
/branches/tracing/kernel/arch/arm32/include/types.h
42,10 → 42,6
# define ATTRIBUTE_PACKED
#endif
 
#define NULL 0
#define false 0
#define true 1
 
typedef signed char int8_t;
typedef signed short int16_t;
typedef signed long int32_t;
68,15 → 64,29
typedef uint32_t unative_t;
typedef int32_t native_t;
 
typedef uint8_t bool;
typedef uint64_t thread_id_t;
typedef uint64_t task_id_t;
typedef uint32_t context_id_t;
#define PRIp "x" /**< Format for uintptr_t. */
#define PRIs "u" /**< Format for size_t. */
#define PRIc "u" /**< Format for count_t. */
#define PRIi "u" /**< Format for index_t. */
 
typedef int32_t inr_t;
typedef int32_t devno_t;
#define PRId8 "d" /**< Format for int8_t. */
#define PRId16 "d" /**< Format for int16_t. */
#define PRId32 "d" /**< Format for int32_t. */
#define PRId64 "lld" /**< Format for int64_t. */
#define PRIdn "d" /**< Format for native_t. */
 
#define PRIu8 "u" /**< Format for uint8_t. */
#define PRIu16 "u" /**< Format for uint16_t. */
#define PRIu32 "u" /**< Format for uint32_t. */
#define PRIu64 "llu" /**< Format for uint64_t. */
#define PRIun "u" /**< Format for unative_t. */
 
#define PRIx8 "x" /**< Format for hexadecimal (u)int8_t. */
#define PRIx16 "x" /**< Format for hexadecimal (u)int16_t. */
#define PRIx32 "x" /**< Format for hexadecimal (u)uint32_t. */
#define PRIx64 "llx" /**< Format for hexadecimal (u)int64_t. */
#define PRIxn "x" /**< Format for hexadecimal (u)native_t. */
 
/** Page table entry.
*
* We have different structs for level 0 and level 1 page table entries.
/branches/tracing/kernel/arch/arm32/include/memstr.h
38,10 → 38,10
 
#define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt))
 
extern void memsetw(uintptr_t dst, size_t cnt, uint16_t x);
extern void memsetb(uintptr_t dst, size_t cnt, uint8_t x);
extern void memsetw(void *dst, size_t cnt, uint16_t x);
extern void memsetb(void *dst, size_t cnt, uint8_t x);
 
extern int memcmp(uintptr_t src, uintptr_t dst, int cnt);
extern int memcmp(const void *a, const void *b, size_t cnt);
 
#endif
 
/branches/tracing/kernel/arch/arm32/include/barrier.h
46,6 → 46,9
#define read_barrier() asm volatile ("" ::: "memory")
#define write_barrier() asm volatile ("" ::: "memory")
 
#define smc_coherence(a)
#define smc_coherence_block(a, l)
 
#endif
 
/** @}
/branches/tracing/kernel/arch/arm32/Makefile.inc
37,9 → 37,9
 
KERNEL_LOAD_ADDRESS = 0x80200000
 
ifeq ($(MACHINE), gxemul_testarm)
# ifeq ($(MACHINE), gxemul_testarm)
DMACHINE = MACHINE_GXEMUL_TESTARM
endif
# endif
 
ATSIGN = %
 
90,7 → 90,7
arch/$(ARCH)/src/mm/tlb.c \
arch/$(ARCH)/src/mm/page_fault.c
 
ifeq ($(MACHINE), gxemul_testarm)
# ifeq ($(MACHINE), gxemul_testarm)
ARCH_SOURCES += arch/$(ARCH)/src/drivers/gxemul.c
endif
# endif
 
/branches/tracing/kernel/arch/arm32/src/exception.c
40,6 → 40,7
#include <interrupt.h>
#include <arch/machine.h>
#include <arch/mm/page_fault.h>
#include <arch/barrier.h>
#include <print.h>
#include <syscall/syscall.h>
#include <udebug/udebug.h>
210,7 → 211,7
*
* Addresses of handlers are stored in memory following exception vectors.
*/
static void install_handler (unsigned handler_addr, unsigned* vector)
static void install_handler(unsigned handler_addr, unsigned *vector)
{
/* relative address (related to exc. vector) of the word
* where handler's address is stored
220,6 → 221,7
/* make it LDR instruction and store at exception vector */
*vector = handler_address_ptr | LDR_OPCODE;
smc_coherence(*vector);
/* store handler's address */
*(vector + EXC_VECTORS) = handler_addr;
227,31 → 229,31
}
 
/** Low-level Reset Exception handler. */
static void reset_exception_entry()
static void reset_exception_entry(void)
{
PROCESS_EXCEPTION(EXC_RESET);
}
 
/** Low-level Software Interrupt Exception handler. */
static void swi_exception_entry()
static void swi_exception_entry(void)
{
PROCESS_EXCEPTION(EXC_SWI);
}
 
/** Low-level Undefined Instruction Exception handler. */
static void undef_instr_exception_entry()
static void undef_instr_exception_entry(void)
{
PROCESS_EXCEPTION(EXC_UNDEF_INSTR);
}
 
/** Low-level Fast Interrupt Exception handler. */
static void fiq_exception_entry()
static void fiq_exception_entry(void)
{
PROCESS_EXCEPTION(EXC_FIQ);
}
 
/** Low-level Prefetch Abort Exception handler. */
static void prefetch_abort_exception_entry()
static void prefetch_abort_exception_entry(void)
{
asm("sub lr, lr, #4");
PROCESS_EXCEPTION(EXC_PREFETCH_ABORT);
258,7 → 260,7
}
 
/** Low-level Data Abort Exception handler. */
static void data_abort_exception_entry()
static void data_abort_exception_entry(void)
{
asm("sub lr, lr, #8");
PROCESS_EXCEPTION(EXC_DATA_ABORT);
270,7 → 272,7
* because of possible occurence of nested interrupt exception, which
* would overwrite (and thus spoil) stack pointer.
*/
static void irq_exception_entry()
static void irq_exception_entry(void)
{
asm("sub lr, lr, #4");
setup_stack_and_save_regs();
/branches/tracing/kernel/arch/arm32/src/mm/page_fault.c
40,6 → 40,7
#include <genarch/mm/page_pt.h>
#include <arch.h>
#include <interrupt.h>
#include <print.h>
 
/** Returns value stored in fault status register.
*
/branches/tracing/kernel/arch/ppc32/include/types.h
35,10 → 35,6
#ifndef KERN_ppc32_TYPES_H_
#define KERN_ppc32_TYPES_H_
 
#define NULL 0
#define false 0
#define true 1
 
typedef signed char int8_t;
typedef signed short int16_t;
typedef signed int int32_t;
61,14 → 57,31
typedef uint32_t unative_t;
typedef int32_t native_t;
 
typedef uint8_t bool;
typedef uint64_t thread_id_t;
typedef uint64_t task_id_t;
typedef uint32_t context_id_t;
/**< Formats for uintptr_t, size_t, count_t and index_t */
#define PRIp "x"
#define PRIs "u"
#define PRIc "u"
#define PRIi "u"
 
typedef int32_t inr_t;
typedef int32_t devno_t;
/**< Formats for (u)int8_t, (u)int16_t, (u)int32_t, (u)int64_t and (u)native_t */
#define PRId8 "d"
#define PRId16 "d"
#define PRId32 "d"
#define PRId64 "lld"
#define PRIdn "d"
 
#define PRIu8 "u"
#define PRIu16 "u"
#define PRIu32 "u"
#define PRIu64 "llu"
#define PRIun "u"
 
#define PRIx8 "x"
#define PRIx16 "x"
#define PRIx32 "x"
#define PRIx64 "llx"
#define PRIxn "x"
 
/** Page Table Entry. */
typedef struct {
unsigned p : 1; /**< Present bit. */
/branches/tracing/kernel/arch/ppc32/include/memstr.h
37,10 → 37,10
 
#define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt))
 
extern void memsetw(uintptr_t dst, size_t cnt, uint16_t x);
extern void memsetb(uintptr_t dst, size_t cnt, uint8_t x);
extern void memsetw(void *dst, size_t cnt, uint16_t x);
extern void memsetb(void *dst, size_t cnt, uint8_t x);
 
extern int memcmp(uintptr_t src, uintptr_t dst, int cnt);
extern int memcmp(const void *a, const void *b, size_t cnt);
 
#endif
 
/branches/tracing/kernel/arch/ppc32/include/boot/boot.h
38,7 → 38,7
#define BOOT_OFFSET 0x8000
 
/* Temporary stack size for boot process */
#define TEMP_STACK_SIZE 0x100
#define TEMP_STACK_SIZE 0x1000
 
#define TASKMAP_MAX_RECORDS 32
#define MEMMAP_MAX_RECORDS 32
/branches/tracing/kernel/arch/ppc32/include/mm/tlb.h
36,6 → 36,8
#define KERN_ppc32_TLB_H_
 
#include <arch/interrupt.h>
#include <arch/types.h>
#include <typedefs.h>
 
typedef struct {
unsigned v : 1; /**< Valid */
/branches/tracing/kernel/arch/ppc32/include/barrier.h
42,6 → 42,43
#define read_barrier() asm volatile ("sync" ::: "memory")
#define write_barrier() asm volatile ("eieio" ::: "memory")
 
/*
* The IMB sequence used here is valid for all possible cache models
* on uniprocessor. SMP might require a different sequence.
* See PowerPC Programming Environment for 32-Bit Microprocessors,
* chapter 5.1.5.2
*/
 
static inline void smc_coherence(void *addr)
{
asm volatile (
"dcbst 0, %0\n"
"sync\n"
"icbi 0, %0\n"
"isync\n"
:: "r" (addr)
);
}
 
#define COHERENCE_INVAL_MIN 4
 
static inline void smc_coherence_block(void *addr, unsigned long len)
{
unsigned long i;
 
for (i = 0; i < len; i += COHERENCE_INVAL_MIN) {
asm volatile ("dcbst 0, %0\n" :: "r" (addr + i));
}
 
asm volatile ("sync");
 
for (i = 0; i < len; i += COHERENCE_INVAL_MIN) {
asm volatile ("icbi 0, %0\n" :: "r" (addr + i));
}
 
asm volatile ("isync");
}
 
#endif
 
/** @}
/branches/tracing/kernel/arch/ppc32/include/drivers/cuda.h
36,6 → 36,7
#define KERN_ppc32_CUDA_H_
 
#include <arch/types.h>
#include <typedefs.h>
 
extern void cuda_init(devno_t devno, uintptr_t base, size_t size);
extern int cuda_get_scancode(void);
/branches/tracing/kernel/arch/ppc32/src/mm/page.c
48,7 → 48,7
uintptr_t hw_map(uintptr_t physaddr, size_t size)
{
if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH))
panic("Unable to map physical memory %p (%d bytes)", physaddr, size)
panic("Unable to map physical memory %p (%" PRIs " bytes)", physaddr, size)
uintptr_t virtaddr = PA2KA(last_frame);
pfn_t i;
/branches/tracing/kernel/arch/ppc32/src/interrupt.c
80,7 → 80,7
* Spurious interrupt.
*/
#ifdef CONFIG_DEBUG
printf("cpu%d: spurious interrupt (inum=%d)\n", CPU->id, inum);
printf("cpu%u: spurious interrupt (inum=%d)\n", CPU->id, inum);
#endif
}
/branches/tracing/kernel/arch/ia32xen/include/types.h
61,14 → 61,6
typedef uint32_t unative_t;
typedef int32_t native_t;
 
typedef uint8_t bool;
typedef uint64_t thread_id_t;
typedef uint64_t task_id_t;
typedef uint32_t context_id_t;
 
typedef int32_t inr_t;
typedef int32_t devno_t;
 
/** Page Table Entry. */
typedef struct {
unsigned present : 1;
/branches/tracing/kernel/arch/ia32xen/src/ia32xen.c
69,7 → 69,7
void arch_pre_main(void)
{
pte_t pte;
memsetb((uintptr_t) &pte, sizeof(pte), 0);
memsetb(&pte, sizeof(pte), 0);
pte.present = 1;
pte.writeable = 1;
103,7 → 103,7
uintptr_t tpa = PFN2ADDR(meminfo.start + meminfo.reserved);
uintptr_t tva = PA2KA(tpa);
memsetb(tva, PAGE_SIZE, 0);
memsetb((void *) tva, PAGE_SIZE, 0);
pte_t *tptl3 = (pte_t *) PA2KA(GET_PTL1_ADDRESS(start_info.ptl0, PTL0_INDEX(tva)));
SET_FRAME_ADDRESS(tptl3, PTL3_INDEX(tva), 0);
/branches/tracing/kernel/arch/ia32xen/src/pm.c
98,7 → 98,7
 
void tss_initialize(tss_t *t)
{
memsetb((uintptr_t) t, sizeof(struct tss), 0);
memsetb(t, sizeof(struct tss), 0);
}
 
static void trap(void)
/branches/tracing/kernel/arch/ia32xen/src/smp/smp.c
142,11 → 142,11
/*
* Prepare new GDT for CPU in question.
*/
if (!(gdt_new = (struct descriptor *) malloc(GDT_ITEMS*sizeof(struct descriptor), FRAME_ATOMIC)))
if (!(gdt_new = (struct descriptor *) malloc(GDT_ITEMS * sizeof(struct descriptor), FRAME_ATOMIC)))
panic("couldn't allocate memory for GDT\n");
 
memcpy(gdt_new, gdt, GDT_ITEMS * sizeof(struct descriptor));
memsetb((uintptr_t)(&gdt_new[TSS_DES]), sizeof(struct descriptor), 0);
memsetb(&gdt_new[TSS_DES], sizeof(struct descriptor), 0);
gdtr.base = (uintptr_t) gdt_new;
 
if (l_apic_send_init_ipi(ops->cpu_apic_id(i))) {
/branches/tracing/kernel/arch/amd64/include/types.h
35,10 → 35,6
#ifndef KERN_amd64_TYPES_H_
#define KERN_amd64_TYPES_H_
 
#define NULL 0
#define false 0
#define true 1
 
typedef signed char int8_t;
typedef signed short int16_t;
typedef signed int int32_t;
61,14 → 57,31
typedef uint64_t unative_t;
typedef int64_t native_t;
 
typedef uint8_t bool;
typedef uint64_t thread_id_t;
typedef uint64_t task_id_t;
typedef uint32_t context_id_t;
/**< Formats for uintptr_t, size_t, count_t and index_t */
#define PRIp "llx"
#define PRIs "llu"
#define PRIc "llu"
#define PRIi "llu"
 
typedef int32_t inr_t;
typedef int32_t devno_t;
/**< Formats for (u)int8_t, (u)int16_t, (u)int32_t, (u)int64_t and (u)native_t */
#define PRId8 "d"
#define PRId16 "d"
#define PRId32 "d"
#define PRId64 "lld"
#define PRIdn "lld"
 
#define PRIu8 "u"
#define PRIu16 "u"
#define PRIu32 "u"
#define PRIu64 "llu"
#define PRIun "llu"
 
#define PRIx8 "x"
#define PRIx16 "x"
#define PRIx32 "x"
#define PRIx64 "llx"
#define PRIxn "llx"
 
/** Page Table Entry. */
typedef struct {
unsigned present : 1;
/branches/tracing/kernel/arch/amd64/include/memstr.h
35,110 → 35,13
#ifndef KERN_amd64_MEMSTR_H_
#define KERN_amd64_MEMSTR_H_
 
/** Copy memory
*
* Copy a given number of bytes (3rd argument)
* from the memory location defined by 2nd argument
* to the memory location defined by 1st argument.
* The memory areas cannot overlap.
*
* @param dst Destination
* @param src Source
* @param cnt Number of bytes
* @return Destination
*/
static inline void * memcpy(void * dst, const void * src, size_t cnt)
{
unative_t d0, d1, d2;
#define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt))
 
asm volatile(
"rep movsq\n\t"
"movq %4, %%rcx\n\t"
"andq $7, %%rcx\n\t"
"jz 1f\n\t"
"rep movsb\n\t"
"1:\n"
: "=&c" (d0), "=&D" (d1), "=&S" (d2)
: "0" ((unative_t)(cnt / 8)), "g" ((unative_t)cnt), "1" ((unative_t) dst), "2" ((unative_t) src)
: "memory");
extern void memsetw(void *dst, size_t cnt, uint16_t x);
extern void memsetb(void *dst, size_t cnt, uint8_t x);
 
return dst;
}
extern int memcmp(const void *a, const void *b, size_t cnt);
 
 
/** Compare memory regions for equality
*
* Compare a given number of bytes (3rd argument)
* at memory locations defined by 1st and 2nd argument
* for equality. If bytes are equal function returns 0.
*
* @param src Region 1
* @param dst Region 2
* @param cnt Number of bytes
* @return Zero if bytes are equal, non-zero otherwise
*/
static inline int memcmp(const void * src, const void * dst, size_t cnt)
{
unative_t d0, d1, d2;
unative_t ret;
asm (
"repe cmpsb\n\t"
"je 1f\n\t"
"movq %3, %0\n\t"
"addq $1, %0\n\t"
"1:\n"
: "=a" (ret), "=&S" (d0), "=&D" (d1), "=&c" (d2)
: "0" (0), "1" (src), "2" (dst), "3" ((unative_t)cnt)
);
return ret;
}
 
/** Fill memory with words
* Fill a given number of words (2nd argument)
* at memory defined by 1st argument with the
* word value defined by 3rd argument.
*
* @param dst Destination
* @param cnt Number of words
* @param x Value to fill
*/
static inline void memsetw(uintptr_t dst, size_t cnt, uint16_t x)
{
unative_t d0, d1;
asm volatile (
"rep stosw\n\t"
: "=&D" (d0), "=&c" (d1), "=&a" (x)
: "0" (dst), "1" ((unative_t)cnt), "2" (x)
: "memory"
);
 
}
 
/** Fill memory with bytes
* Fill a given number of bytes (2nd argument)
* at memory defined by 1st argument with the
* word value defined by 3rd argument.
*
* @param dst Destination
* @param cnt Number of bytes
* @param x Value to fill
*/
static inline void memsetb(uintptr_t dst, size_t cnt, uint8_t x)
{
unative_t d0, d1;
asm volatile (
"rep stosb\n\t"
: "=&D" (d0), "=&c" (d1), "=&a" (x)
: "0" (dst), "1" ((unative_t)cnt), "2" (x)
: "memory"
);
 
}
 
#endif
 
/** @}
/branches/tracing/kernel/arch/amd64/include/atomic.h
41,17 → 41,17
 
static inline void atomic_inc(atomic_t *val) {
#ifdef CONFIG_SMP
asm volatile ("lock incq %0\n" : "=m" (val->count));
asm volatile ("lock incq %0\n" : "+m" (val->count));
#else
asm volatile ("incq %0\n" : "=m" (val->count));
asm volatile ("incq %0\n" : "+m" (val->count));
#endif /* CONFIG_SMP */
}
 
static inline void atomic_dec(atomic_t *val) {
#ifdef CONFIG_SMP
asm volatile ("lock decq %0\n" : "=m" (val->count));
asm volatile ("lock decq %0\n" : "+m" (val->count));
#else
asm volatile ("decq %0\n" : "=m" (val->count));
asm volatile ("decq %0\n" : "+m" (val->count));
#endif /* CONFIG_SMP */
}
 
61,7 → 61,7
 
asm volatile (
"lock xaddq %1, %0\n"
: "=m" (val->count), "+r" (r)
: "+m" (val->count), "+r" (r)
);
 
return r;
73,14 → 73,14
asm volatile (
"lock xaddq %1, %0\n"
: "=m" (val->count), "+r" (r)
: "+m" (val->count), "+r" (r)
);
return r;
}
 
#define atomic_preinc(val) (atomic_postinc(val)+1)
#define atomic_predec(val) (atomic_postdec(val)-1)
#define atomic_preinc(val) (atomic_postinc(val) + 1)
#define atomic_predec(val) (atomic_postdec(val) - 1)
 
static inline uint64_t test_and_set(atomic_t *val) {
uint64_t v;
88,7 → 88,7
asm volatile (
"movq $1, %0\n"
"xchgq %0, %1\n"
: "=r" (v),"=m" (val->count)
: "=r" (v), "+m" (val->count)
);
return v;
102,20 → 102,20
 
preemption_disable();
asm volatile (
"0:;"
"0:\n"
#ifdef CONFIG_HT
"pause;"
"pause\n"
#endif
"mov %0, %1;"
"testq %1, %1;"
"jnz 0b;" /* Lightweight looping on locked spinlock */
"mov %0, %1\n"
"testq %1, %1\n"
"jnz 0b\n" /* Lightweight looping on locked spinlock */
"incq %1;" /* now use the atomic operation */
"xchgq %0, %1;"
"testq %1, %1;"
"jnz 0b;"
: "=m"(val->count),"=r"(tmp)
);
"incq %1\n" /* now use the atomic operation */
"xchgq %0, %1\n"
"testq %1, %1\n"
"jnz 0b\n"
: "+m" (val->count), "=r"(tmp)
);
/*
* Prevent critical section code from bleeding out this way up.
*/
/branches/tracing/kernel/arch/amd64/Makefile.inc
35,6 → 35,7
TARGET = amd64-linux-gnu
TOOLCHAIN_DIR = /usr/local/amd64
 
FPU_NO_CFLAGS = -mno-sse -mno-sse2
CMN1 = -m64 -mcmodel=kernel -mno-red-zone -fno-unwind-tables
GCC_CFLAGS += $(CMN1)
ICC_CFLAGS += $(CMN1)
/branches/tracing/kernel/arch/amd64/src/pm.c
155,7 → 155,7
 
void tss_initialize(tss_t *t)
{
memsetb((uintptr_t) t, sizeof(tss_t), 0);
memsetb(t, sizeof(tss_t), 0);
}
 
/*
239,7 → 239,7
preemption_disable();
ipl_t ipl = interrupts_disable();
memsetb((uintptr_t) idt, sizeof(idt), 0);
memsetb(idt, sizeof(idt), 0);
idtr_load(&idtr);
interrupts_restore(ipl);
/branches/tracing/kernel/arch/amd64/src/asm_utils.S
65,6 → 65,8
.global get_cycle
.global read_efer_flag
.global set_efer_flag
.global memsetb
.global memsetw
.global memcpy
.global memcpy_from_uspace
.global memcpy_to_uspace
71,6 → 73,14
.global memcpy_from_uspace_failover_address
.global memcpy_to_uspace_failover_address
 
# Wrapper for generic memsetb
memsetb:
jmp _memsetb
 
# Wrapper for generic memsetw
memsetw:
jmp _memsetw
 
#define MEMCPY_DST %rdi
#define MEMCPY_SRC %rsi
#define MEMCPY_SIZE %rdx
/branches/tracing/kernel/arch/amd64/src/proc/thread.c
46,7 → 46,7
* Kernel RSP can be precalculated at thread creation time.
*/
t->arch.syscall_rsp[SYSCALL_KSTACK_RSP] =
(uintptr_t)&t->kstack[PAGE_SIZE - sizeof(uint64_t)];
(uintptr_t) &t->kstack[PAGE_SIZE - sizeof(uint64_t)];
}
 
/** @}
/branches/tracing/kernel/arch/amd64/src/debugger.c
106,25 → 106,33
{
unsigned int i;
char *symbol;
 
#ifdef __32_BITS__
printf("# Count Address In symbol\n");
printf("-- ----- ---------- ---------\n");
#endif
 
#ifdef __64_BITS__
printf("# Count Address In symbol\n");
printf("-- ----- ------------------ ---------\n");
#endif
if (sizeof(void *) == 4) {
printf("# Count Address In symbol\n");
printf("-- ----- ---------- ---------\n");
} else {
printf("# Count Address In symbol\n");
printf("-- ----- ------------------ ---------\n");
}
for (i = 0; i < BKPOINTS_MAX; i++)
if (breakpoints[i].address) {
symbol = get_symtab_entry(breakpoints[i].address);
if (sizeof(void *) == 4)
printf("%-2u %-5d %#10zx %s\n", i, breakpoints[i].counter,
breakpoints[i].address, symbol);
else
printf("%-2u %-5d %#18zx %s\n", i, breakpoints[i].counter,
breakpoints[i].address, symbol);
 
#ifdef __32_BITS__
printf("%-2u %-5d %#10zx %s\n", i,
breakpoints[i].counter, breakpoints[i].address,
symbol);
#endif
 
#ifdef __64_BITS__
printf("%-2u %-5d %#18zx %s\n", i,
breakpoints[i].counter, breakpoints[i].address,
symbol);
#endif
 
}
return 1;
}
162,19 → 170,23
if ((flags & BKPOINT_INSTR)) {
;
} else {
if (sizeof(int) == 4)
dr7 |= ((unative_t) 0x3) << (18 + 4*curidx);
else /* 8 */
dr7 |= ((unative_t) 0x2) << (18 + 4*curidx);
#ifdef __32_BITS__
dr7 |= ((unative_t) 0x3) << (18 + 4 * curidx);
#endif
 
#ifdef __64_BITS__
dr7 |= ((unative_t) 0x2) << (18 + 4 * curidx);
#endif
if ((flags & BKPOINT_WRITE))
dr7 |= ((unative_t) 0x1) << (16 + 4*curidx);
dr7 |= ((unative_t) 0x1) << (16 + 4 * curidx);
else if ((flags & BKPOINT_READ_WRITE))
dr7 |= ((unative_t) 0x3) << (16 + 4*curidx);
dr7 |= ((unative_t) 0x3) << (16 + 4 * curidx);
}
 
/* Enable global breakpoint */
dr7 |= 0x2 << (curidx*2);
dr7 |= 0x2 << (curidx * 2);
 
write_dr7(dr7);
246,15 → 258,15
if ((breakpoints[slot].flags & BKPOINT_CHECK_ZERO)) {
if (*((unative_t *) breakpoints[slot].address) != 0)
return;
printf("**** Found ZERO on address %lx (slot %d) ****\n",
breakpoints[slot].address, slot);
printf("*** Found ZERO on address %lx (slot %d) ***\n",
breakpoints[slot].address, slot);
} else {
printf("Data watchpoint - new data: %lx\n",
*((unative_t *) breakpoints[slot].address));
*((unative_t *) breakpoints[slot].address));
}
}
printf("Reached breakpoint %d:%lx(%s)\n", slot, getip(istate),
get_symtab_entry(getip(istate)));
get_symtab_entry(getip(istate)));
printf("***Type 'exit' to exit kconsole.\n");
atomic_set(&haltstate,1);
kconsole((void *) "debug");
359,7 → 371,9
}
 
#ifdef CONFIG_SMP
static void debug_ipi(int n __attribute__((unused)), istate_t *istate __attribute__((unused)))
static void
debug_ipi(int n __attribute__((unused)),
istate_t *istate __attribute__((unused)))
{
int i;
 
375,7 → 389,7
{
int i;
 
for (i=0; i<BKPOINTS_MAX; i++)
for (i = 0; i < BKPOINTS_MAX; i++)
breakpoints[i].address = NULL;
cmd_initialize(&bkpts_info);
396,11 → 410,9
panic("could not register command %s\n", addwatchp_info.name);
#endif
exc_register(VECTOR_DEBUG, "debugger",
debug_exception);
exc_register(VECTOR_DEBUG, "debugger", debug_exception);
#ifdef CONFIG_SMP
exc_register(VECTOR_DEBUG_IPI, "debugger_smp",
debug_ipi);
exc_register(VECTOR_DEBUG_IPI, "debugger_smp", debug_ipi);
#endif
}
 
/branches/tracing/kernel/arch/ppc64/include/exception.h
82,6 → 82,7
{
istate->pc = retaddr;
}
 
/** Return true if exception happened while in userspace */
#include <panic.h>
static inline int istate_from_uspace(istate_t *istate)
89,6 → 90,7
panic("istate_from_uspace not yet implemented");
return 0;
}
 
static inline unative_t istate_get_pc(istate_t *istate)
{
return istate->pc;
/branches/tracing/kernel/arch/ppc64/include/types.h
35,10 → 35,6
#ifndef KERN_ppc64_TYPES_H_
#define KERN_ppc64_TYPES_H_
 
#define NULL 0
#define false 0
#define true 1
 
typedef signed char int8_t;
typedef signed short int16_t;
typedef signed int int32_t;
61,14 → 57,6
typedef uint64_t unative_t;
typedef int64_t native_t;
 
typedef uint8_t bool;
typedef uint64_t thread_id_t;
typedef uint64_t task_id_t;
typedef uint32_t context_id_t;
 
typedef int32_t inr_t;
typedef int32_t devno_t;
 
/** Page Table Entry. */
typedef struct {
unsigned p : 1; /**< Present bit. */
/branches/tracing/kernel/arch/ppc64/include/memstr.h
37,10 → 37,10
 
#define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt))
 
extern void memsetw(uintptr_t dst, size_t cnt, uint16_t x);
extern void memsetb(uintptr_t dst, size_t cnt, uint8_t x);
extern void memsetw(void *dst, size_t cnt, uint16_t x);
extern void memsetb(void *dst, size_t cnt, uint8_t x);
 
extern int memcmp(uintptr_t src, uintptr_t dst, int cnt);
extern int memcmp(const void *a, const void *b, size_t cnt);
 
#endif
 
/branches/tracing/kernel/arch/ppc64/include/barrier.h
38,10 → 38,13
#define CS_ENTER_BARRIER() asm volatile ("" ::: "memory")
#define CS_LEAVE_BARRIER() asm volatile ("" ::: "memory")
 
#define memory_barrier() asm volatile ("sync" ::: "memory")
#define read_barrier() asm volatile ("sync" ::: "memory")
#define write_barrier() asm volatile ("eieio" ::: "memory")
#define memory_barrier() asm volatile ("sync" ::: "memory")
#define read_barrier() asm volatile ("sync" ::: "memory")
#define write_barrier() asm volatile ("eieio" ::: "memory")
 
#define smc_coherence(a)
#define smc_coherence_block(a, l)
 
#endif
 
/** @}
/branches/tracing/kernel/arch/ppc64/src/cpu/cpu.c
53,7 → 53,7
 
void cpu_print_report(cpu_t *m)
{
printf("cpu%d: version=%d, revision=%d\n", m->id, m->arch.version, m->arch.revision);
printf("cpu%u: version=%d, revision=%d\n", m->id, m->arch.version, m->arch.revision);
}
 
/** @}
/branches/tracing/kernel/arch/ppc64/src/mm/page.c
252,7 → 252,7
 
void pht_init(void)
{
memsetb((uintptr_t) phte, 1 << PHT_BITS, 0);
memsetb(phte, 1 << PHT_BITS, 0);
}
 
 
289,7 → 289,7
uintptr_t hw_map(uintptr_t physaddr, size_t size)
{
if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH))
panic("Unable to map physical memory %p (%d bytes)", physaddr, size)
panic("Unable to map physical memory %p (%" PRIs " bytes)", physaddr, size)
uintptr_t virtaddr = PA2KA(last_frame);
pfn_t i;
/branches/tracing/kernel/arch/ppc64/src/interrupt.c
80,7 → 80,7
* Spurious interrupt.
*/
#ifdef CONFIG_DEBUG
printf("cpu%d: spurious interrupt (inum=%d)\n", CPU->id, inum);
printf("cpu%u: spurious interrupt (inum=%d)\n", CPU->id, inum);
#endif
}
/branches/tracing/kernel/arch/mips32/include/types.h
35,10 → 35,6
#ifndef KERN_mips32_TYPES_H_
#define KERN_mips32_TYPES_H_
 
#define NULL 0
#define false 0
#define true 1
 
typedef signed char int8_t;
typedef signed short int16_t;
typedef signed long int32_t;
61,14 → 57,29
typedef uint32_t unative_t;
typedef int32_t native_t;
 
typedef uint8_t bool;
typedef uint64_t thread_id_t;
typedef uint64_t task_id_t;
typedef uint32_t context_id_t;
#define PRIp "x" /**< Format for uintptr_t. */
#define PRIs "u" /**< Format for size_t. */
#define PRIc "u" /**< Format for count_t. */
#define PRIi "u" /**< Format for index_t. */
 
typedef int32_t inr_t;
typedef int32_t devno_t;
#define PRId8 "d" /**< Format for int8_t. */
#define PRId16 "d" /**< Format for int16_t. */
#define PRId32 "ld" /**< Format for int32_t. */
#define PRId64 "lld" /**< Format for int64_t. */
#define PRIdn "d" /**< Format for native_t. */
 
#define PRIu8 "u" /**< Format for uint8_t. */
#define PRIu16 "u" /**< Format for uint16_t. */
#define PRIu32 "u" /**< Format for uint32_t. */
#define PRIu64 "llu" /**< Format for uint64_t. */
#define PRIun "u" /**< Format for unative_t. */
 
#define PRIx8 "x" /**< Format for hexadecimal (u)int8_t. */
#define PRIx16 "x" /**< Format for hexadecimal (u)int16_t. */
#define PRIx32 "x" /**< Format for hexadecimal (u)uint32_t. */
#define PRIx64 "llx" /**< Format for hexadecimal (u)int64_t. */
#define PRIxn "x" /**< Format for hexadecimal (u)native_t. */
 
/** Page Table Entry. */
typedef struct {
unsigned g : 1; /**< Global bit. */
/branches/tracing/kernel/arch/mips32/include/memstr.h
37,10 → 37,10
 
#define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt))
 
extern void memsetw(uintptr_t dst, size_t cnt, uint16_t x);
extern void memsetb(uintptr_t dst, size_t cnt, uint8_t x);
extern void memsetw(void *dst, size_t cnt, uint16_t x);
extern void memsetb(void *dst, size_t cnt, uint8_t x);
 
extern int memcmp(uintptr_t src, uintptr_t dst, int cnt);
extern int memcmp(const void *a, const void *b, size_t cnt);
 
#endif
 
/branches/tracing/kernel/arch/mips32/include/atomic.h
58,13 → 58,13
asm volatile (
"1:\n"
" ll %0, %1\n"
" addiu %0, %0, %3\n" /* same as addi, but never traps on overflow */
" addu %0, %0, %3\n" /* same as addi, but never traps on overflow */
" move %2, %0\n"
" sc %0, %1\n"
" beq %0, %4, 1b\n" /* if the atomic operation failed, try again */
" nop\n"
: "=&r" (tmp), "=m" (val->count), "=&r" (v)
: "i" (i), "i" (0)
: "=&r" (tmp), "+m" (val->count), "=&r" (v)
: "r" (i), "i" (0)
);
 
return v;
/branches/tracing/kernel/arch/mips32/include/barrier.h
45,6 → 45,9
#define read_barrier() asm volatile ("" ::: "memory")
#define write_barrier() asm volatile ("" ::: "memory")
 
#define smc_coherence(a)
#define smc_coherence_block(a, l)
 
#endif
 
/** @}
/branches/tracing/kernel/arch/mips32/src/exception.c
167,7 → 167,7
* Spurious interrupt.
*/
#ifdef CONFIG_DEBUG
printf("cpu%d: spurious interrupt (inum=%d)\n", CPU->id, i);
printf("cpu%u: spurious interrupt (inum=%d)\n", CPU->id, i);
#endif
}
}
/branches/tracing/kernel/arch/mips32/src/mips32.c
50,6 → 50,7
#include <arch/interrupt.h>
#include <arch/drivers/arc.h>
#include <console/chardev.h>
#include <arch/barrier.h>
#include <arch/debugger.h>
#include <genarch/fb/fb.h>
#include <genarch/fb/visuals.h>
100,14 → 101,18
 
/* Copy the exception vectors to the right places */
memcpy(TLB_EXC, (char *) tlb_refill_entry, EXCEPTION_JUMP_SIZE);
smc_coherence_block(TLB_EXC, EXCEPTION_JUMP_SIZE);
memcpy(NORM_EXC, (char *) exception_entry, EXCEPTION_JUMP_SIZE);
smc_coherence_block(NORM_EXC, EXCEPTION_JUMP_SIZE);
memcpy(CACHE_EXC, (char *) cache_error_entry, EXCEPTION_JUMP_SIZE);
smc_coherence_block(CACHE_EXC, EXCEPTION_JUMP_SIZE);
/*
* Switch to BEV normal level so that exception vectors point to the kernel.
* Clear the error level.
* Switch to BEV normal level so that exception vectors point to the
* kernel. Clear the error level.
*/
cp0_status_write(cp0_status_read() & ~(cp0_status_bev_bootstrap_bit|cp0_status_erl_error_bit));
cp0_status_write(cp0_status_read() &
~(cp0_status_bev_bootstrap_bit | cp0_status_erl_error_bit));
 
/*
* Mask all interrupts
122,7 → 127,8
interrupt_init();
console_init(device_assign_devno());
#ifdef CONFIG_FB
fb_init(0x12000000, 640, 480, 1920, VISUAL_RGB_8_8_8); // gxemul framebuffer
/* GXemul framebuffer */
fb_init(0x12000000, 640, 480, 1920, VISUAL_RGB_8_8_8);
#endif
sysinfo_set_item_val("machine." STRING(MACHINE), NULL, 1);
}
143,13 → 149,14
{
/* EXL = 1, UM = 1, IE = 1 */
cp0_status_write(cp0_status_read() | (cp0_status_exl_exception_bit |
cp0_status_um_bit | cp0_status_ie_enabled_bit));
cp0_status_um_bit | cp0_status_ie_enabled_bit));
cp0_epc_write((uintptr_t) kernel_uarg->uspace_entry);
userspace_asm(((uintptr_t) kernel_uarg->uspace_stack + PAGE_SIZE),
(uintptr_t) kernel_uarg->uspace_uarg,
(uintptr_t) kernel_uarg->uspace_entry);
(uintptr_t) kernel_uarg->uspace_uarg,
(uintptr_t) kernel_uarg->uspace_entry);
while (1);
while (1)
;
}
 
/** Perform mips32 specific tasks needed before the new task is run. */
160,7 → 167,8
/** Perform mips32 specific tasks needed before the new thread is scheduled. */
void before_thread_runs_arch(void)
{
supervisor_sp = (uintptr_t) &THREAD->kstack[THREAD_STACK_SIZE-SP_DELTA];
supervisor_sp = (uintptr_t) &THREAD->kstack[THREAD_STACK_SIZE -
SP_DELTA];
}
 
void after_thread_ran_arch(void)
182,7 → 190,8
if (!arc_reboot())
___halt();
while (1);
while (1)
;
}
 
/** @}
/branches/tracing/kernel/arch/mips32/src/debugger.c
33,6 → 33,7
*/
 
#include <arch/debugger.h>
#include <arch/barrier.h>
#include <memstr.h>
#include <console/kconsole.h>
#include <console/cmd.h>
72,7 → 73,8
};
static cmd_info_t addbkpt_info = {
.name = "addbkpt",
.description = "addbkpt <&symbol> - new bkpoint. Break on J/Branch insts unsupported.",
.description = "addbkpt <&symbol> - new bkpoint. Break on J/Branch "
"insts unsupported.",
.func = cmd_add_breakpoint,
.argc = 1,
.argv = &add_argv
84,7 → 86,8
};
static cmd_info_t addbkpte_info = {
.name = "addbkpte",
.description = "addebkpte <&symbol> <&func> - new bkpoint. Call func(or Nothing if 0).",
.description = "addebkpte <&symbol> <&func> - new bkpoint. Call "
"func(or Nothing if 0).",
.func = cmd_add_breakpoint,
.argc = 2,
.argv = adde_argv
93,7 → 96,7
static struct {
uint32_t andmask;
uint32_t value;
}jmpinstr[] = {
} jmpinstr[] = {
{0xf3ff0000, 0x41000000}, /* BCzF */
{0xf3ff0000, 0x41020000}, /* BCzFL */
{0xf3ff0000, 0x41010000}, /* BCzT */
117,7 → 120,7
{0xfc000000, 0x08000000}, /* J */
{0xfc000000, 0x0c000000}, /* JAL */
{0xfc1f07ff, 0x00000009}, /* JALR */
{0,0} /* EndOfTable */
{0, 0} /* EndOfTable */
};
 
/** Test, if the given instruction is a jump or branch instruction
129,7 → 132,7
{
int i;
 
for (i=0;jmpinstr[i].andmask;i++) {
for (i = 0; jmpinstr[i].andmask; i++) {
if ((instr & jmpinstr[i].andmask) == jmpinstr[i].value)
return true;
}
152,14 → 155,16
spinlock_lock(&bkpoint_lock);
 
/* Check, that the breakpoints do not conflict */
for (i=0; i<BKPOINTS_MAX; i++) {
for (i = 0; i < BKPOINTS_MAX; i++) {
if (breakpoints[i].address == (uintptr_t)argv->intval) {
printf("Duplicate breakpoint %d.\n", i);
spinlock_unlock(&bkpoints_lock);
return 0;
} else if (breakpoints[i].address == (uintptr_t)argv->intval + sizeof(unative_t) || \
breakpoints[i].address == (uintptr_t)argv->intval - sizeof(unative_t)) {
printf("Adjacent breakpoints not supported, conflict with %d.\n", i);
} else if (breakpoints[i].address == (uintptr_t)argv->intval +
sizeof(unative_t) || breakpoints[i].address ==
(uintptr_t)argv->intval - sizeof(unative_t)) {
printf("Adjacent breakpoints not supported, conflict "
"with %d.\n", i);
spinlock_unlock(&bkpoints_lock);
return 0;
}
166,7 → 171,7
}
 
for (i=0; i<BKPOINTS_MAX; i++)
for (i = 0; i < BKPOINTS_MAX; i++)
if (!breakpoints[i].address) {
cur = &breakpoints[i];
break;
185,7 → 190,7
cur->flags = 0;
} else { /* We are add extended */
cur->flags = BKPOINT_FUNCCALL;
cur->bkfunc = (void (*)(void *, istate_t *)) argv[1].intval;
cur->bkfunc = (void (*)(void *, istate_t *)) argv[1].intval;
}
if (is_jump(cur->instruction))
cur->flags |= BKPOINT_ONESHOT;
193,6 → 198,7
 
/* Set breakpoint */
*((unative_t *)cur->address) = 0x0d;
smc_coherence(cur->address);
 
spinlock_unlock(&bkpoint_lock);
interrupts_restore(ipl);
200,8 → 206,6
return 1;
}
 
 
 
/** Remove breakpoint from table */
int cmd_del_breakpoint(cmd_arg_t *argv)
{
229,7 → 233,9
return 0;
}
((uint32_t *)cur->address)[0] = cur->instruction;
smc_coherence(((uint32_t *)cur->address)[0]);
((uint32_t *)cur->address)[1] = cur->nextinstruction;
smc_coherence(((uint32_t *)cur->address)[1]);
 
cur->address = NULL;
 
252,11 → 258,11
symbol = get_symtab_entry(breakpoints[i].address);
printf("%-2u %-5d %#10zx %-6s %-7s %-8s %s\n", i,
breakpoints[i].counter, breakpoints[i].address,
((breakpoints[i].flags & BKPOINT_INPROG) ? "true" : "false"),
((breakpoints[i].flags & BKPOINT_ONESHOT) ? "true" : "false"),
((breakpoints[i].flags & BKPOINT_FUNCCALL) ? "true" : "false"),
symbol);
breakpoints[i].counter, breakpoints[i].address,
((breakpoints[i].flags & BKPOINT_INPROG) ? "true" :
"false"), ((breakpoints[i].flags & BKPOINT_ONESHOT)
? "true" : "false"), ((breakpoints[i].flags &
BKPOINT_FUNCCALL) ? "true" : "false"), symbol);
}
return 1;
}
266,7 → 272,7
{
int i;
 
for (i=0; i<BKPOINTS_MAX; i++)
for (i = 0; i < BKPOINTS_MAX; i++)
breakpoints[i].address = NULL;
cmd_initialize(&bkpts_info);
305,16 → 311,16
panic("Breakpoint in branch delay slot not supported.\n");
 
spinlock_lock(&bkpoint_lock);
for (i=0; i<BKPOINTS_MAX; i++) {
for (i = 0; i < BKPOINTS_MAX; i++) {
/* Normal breakpoint */
if (fireaddr == breakpoints[i].address \
&& !(breakpoints[i].flags & BKPOINT_REINST)) {
if (fireaddr == breakpoints[i].address &&
!(breakpoints[i].flags & BKPOINT_REINST)) {
cur = &breakpoints[i];
break;
}
/* Reinst only breakpoint */
if ((breakpoints[i].flags & BKPOINT_REINST) \
&& (fireaddr ==breakpoints[i].address+sizeof(unative_t))) {
if ((breakpoints[i].flags & BKPOINT_REINST) &&
(fireaddr == breakpoints[i].address + sizeof(unative_t))) {
cur = &breakpoints[i];
break;
}
323,8 → 329,10
if (cur->flags & BKPOINT_REINST) {
/* Set breakpoint on first instruction */
((uint32_t *)cur->address)[0] = 0x0d;
smc_coherence(((uint32_t *)cur->address)[0]);
/* Return back the second */
((uint32_t *)cur->address)[1] = cur->nextinstruction;
smc_coherence(((uint32_t *)cur->address)[1]);
cur->flags &= ~BKPOINT_REINST;
spinlock_unlock(&bkpoint_lock);
return;
333,11 → 341,12
printf("Warning: breakpoint recursion\n");
if (!(cur->flags & BKPOINT_FUNCCALL))
printf("***Breakpoint %d: %p in %s.\n", i,
fireaddr, get_symtab_entry(istate->epc));
printf("***Breakpoint %d: %p in %s.\n", i, fireaddr,
get_symtab_entry(istate->epc));
 
/* Return first instruction back */
((uint32_t *)cur->address)[0] = cur->instruction;
smc_coherence(cur->address);
 
if (! (cur->flags & BKPOINT_ONESHOT)) {
/* Set Breakpoint on next instruction */
/branches/tracing/kernel/arch/ia32/include/types.h
35,10 → 35,6
#ifndef KERN_ia32_TYPES_H_
#define KERN_ia32_TYPES_H_
 
#define NULL 0
#define false 0
#define true 1
 
typedef signed char int8_t;
typedef signed short int16_t;
typedef signed long int32_t;
61,14 → 57,29
typedef uint32_t unative_t;
typedef int32_t native_t;
 
typedef uint8_t bool;
typedef uint64_t thread_id_t;
typedef uint64_t task_id_t;
typedef uint32_t context_id_t;
#define PRIp "x" /**< Format for uintptr_t. */
#define PRIs "u" /**< Format for size_t. */
#define PRIc "u" /**< Format for count_t. */
#define PRIi "u" /**< Format for index_t. */
 
typedef int32_t inr_t;
typedef int32_t devno_t;
#define PRId8 "d" /**< Format for int8_t. */
#define PRId16 "d" /**< Format for int16_t. */
#define PRId32 "d" /**< Format for int32_t. */
#define PRId64 "lld" /**< Format for int64_t. */
#define PRIdn "d" /**< Format for native_t. */
 
#define PRIu8 "u" /**< Format for uint8_t. */
#define PRIu16 "u" /**< Format for uint16_t. */
#define PRIu32 "u" /**< Format for uint32_t. */
#define PRIu64 "llu" /**< Format for uint64_t. */
#define PRIun "u" /**< Format for unative_t. */
 
#define PRIx8 "x" /**< Format for hexadecimal (u)int8_t. */
#define PRIx16 "x" /**< Format for hexadecimal (u)int16_t. */
#define PRIx32 "x" /**< Format for hexadecimal (u)uint32_t. */
#define PRIx64 "llx" /**< Format for hexadecimal (u)int64_t. */
#define PRIxn "x" /**< Format for hexadecimal (u)native_t. */
 
/** Page Table Entry. */
typedef struct {
unsigned present : 1;
/branches/tracing/kernel/arch/ia32/include/memstr.h
35,116 → 35,13
#ifndef KERN_ia32_MEMSTR_H_
#define KERN_ia32_MEMSTR_H_
 
/** Copy memory
*
* Copy a given number of bytes (3rd argument)
* from the memory location defined by 2nd argument
* to the memory location defined by 1st argument.
* The memory areas cannot overlap.
*
* @param dst Destination
* @param src Source
* @param cnt Number of bytes
* @return Destination
*/
static inline void * memcpy(void * dst, const void * src, size_t cnt)
{
unative_t d0, d1, d2;
#define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt))
 
asm volatile(
/* copy all full dwords */
"rep movsl\n\t"
/* load count again */
"movl %4, %%ecx\n\t"
/* ecx = ecx mod 4 */
"andl $3, %%ecx\n\t"
/* are there last <=3 bytes? */
"jz 1f\n\t"
/* copy last <=3 bytes */
"rep movsb\n\t"
/* exit from asm block */
"1:\n"
: "=&c" (d0), "=&D" (d1), "=&S" (d2)
: "0" ((unative_t) (cnt / 4)), "g" ((unative_t) cnt), "1" ((unative_t) dst), "2" ((unative_t) src)
: "memory");
extern void memsetw(void *dst, size_t cnt, uint16_t x);
extern void memsetb(void *dst, size_t cnt, uint8_t x);
 
return dst;
}
extern int memcmp(const void *a, const void *b, size_t cnt);
 
 
/** Compare memory regions for equality
*
* Compare a given number of bytes (3rd argument)
* at memory locations defined by 1st and 2nd argument
* for equality. If bytes are equal function returns 0.
*
* @param src Region 1
* @param dst Region 2
* @param cnt Number of bytes
* @return Zero if bytes are equal, non-zero otherwise
*/
static inline int memcmp(const void * src, const void * dst, size_t cnt)
{
uint32_t d0, d1, d2;
int ret;
asm (
"repe cmpsb\n\t"
"je 1f\n\t"
"movl %3, %0\n\t"
"addl $1, %0\n\t"
"1:\n"
: "=a" (ret), "=&S" (d0), "=&D" (d1), "=&c" (d2)
: "0" (0), "1" ((unative_t) src), "2" ((unative_t) dst), "3" ((unative_t) cnt)
);
return ret;
}
 
/** Fill memory with words
* Fill a given number of words (2nd argument)
* at memory defined by 1st argument with the
* word value defined by 3rd argument.
*
* @param dst Destination
* @param cnt Number of words
* @param x Value to fill
*/
static inline void memsetw(uintptr_t dst, size_t cnt, uint16_t x)
{
uint32_t d0, d1;
asm volatile (
"rep stosw\n\t"
: "=&D" (d0), "=&c" (d1), "=&a" (x)
: "0" (dst), "1" (cnt), "2" (x)
: "memory"
);
 
}
 
/** Fill memory with bytes
* Fill a given number of bytes (2nd argument)
* at memory defined by 1st argument with the
* word value defined by 3rd argument.
*
* @param dst Destination
* @param cnt Number of bytes
* @param x Value to fill
*/
static inline void memsetb(uintptr_t dst, size_t cnt, uint8_t x)
{
uint32_t d0, d1;
asm volatile (
"rep stosb\n\t"
: "=&D" (d0), "=&c" (d1), "=&a" (x)
: "0" (dst), "1" (cnt), "2" (x)
: "memory"
);
 
}
 
#endif
 
/** @}
/branches/tracing/kernel/arch/ia32/include/smp/apic.h
105,8 → 105,8
#define MODEL_CLUSTER 0x0
 
/** Interrupt Command Register. */
#define ICRlo (0x300/sizeof(uint32_t))
#define ICRhi (0x310/sizeof(uint32_t))
#define ICRlo (0x300 / sizeof(uint32_t))
#define ICRhi (0x310 / sizeof(uint32_t))
typedef struct {
union {
uint32_t lo;
133,10 → 133,10
} __attribute__ ((packed)) icr_t;
 
/* End Of Interrupt. */
#define EOI (0x0b0/sizeof(uint32_t))
#define EOI (0x0b0 / sizeof(uint32_t))
 
/** Error Status Register. */
#define ESR (0x280/sizeof(uint32_t))
#define ESR (0x280 / sizeof(uint32_t))
typedef union {
uint32_t value;
uint8_t err_bitmap;
154,7 → 154,7
} esr_t;
 
/* Task Priority Register */
#define TPR (0x080/sizeof(uint32_t))
#define TPR (0x080 / sizeof(uint32_t))
typedef union {
uint32_t value;
struct {
164,7 → 164,7
} tpr_t;
 
/** Spurious-Interrupt Vector Register. */
#define SVR (0x0f0/sizeof(uint32_t))
#define SVR (0x0f0 / sizeof(uint32_t))
typedef union {
uint32_t value;
struct {
176,7 → 176,7
} svr_t;
 
/** Time Divide Configuration Register. */
#define TDCR (0x3e0/sizeof(uint32_t))
#define TDCR (0x3e0 / sizeof(uint32_t))
typedef union {
uint32_t value;
struct {
186,13 → 186,13
} tdcr_t;
 
/* Initial Count Register for Timer */
#define ICRT (0x380/sizeof(uint32_t))
#define ICRT (0x380 / sizeof(uint32_t))
 
/* Current Count Register for Timer */
#define CCRT (0x390/sizeof(uint32_t))
#define CCRT (0x390 / sizeof(uint32_t))
 
/** LVT Timer register. */
#define LVT_Tm (0x320/sizeof(uint32_t))
#define LVT_Tm (0x320 / sizeof(uint32_t))
typedef union {
uint32_t value;
struct {
207,8 → 207,8
} lvt_tm_t;
 
/** LVT LINT registers. */
#define LVT_LINT0 (0x350/sizeof(uint32_t))
#define LVT_LINT1 (0x360/sizeof(uint32_t))
#define LVT_LINT0 (0x350 / sizeof(uint32_t))
#define LVT_LINT1 (0x360 / sizeof(uint32_t))
typedef union {
uint32_t value;
struct {
225,7 → 225,7
} lvt_lint_t;
 
/** LVT Error register. */
#define LVT_Err (0x370/sizeof(uint32_t))
#define LVT_Err (0x370 / sizeof(uint32_t))
typedef union {
uint32_t value;
struct {
239,7 → 239,7
} lvt_error_t;
 
/** Local APIC ID Register. */
#define L_APIC_ID (0x020/sizeof(uint32_t))
#define L_APIC_ID (0x020 / sizeof(uint32_t))
typedef union {
uint32_t value;
struct {
249,14 → 249,14
} l_apic_id_t;
 
/** Local APIC Version Register */
#define LAVR (0x030/sizeof(uint32_t))
#define LAVR (0x030 / sizeof(uint32_t))
#define LAVR_Mask 0xff
#define is_local_apic(x) (((x)&LAVR_Mask&0xf0)==0x1)
#define is_82489DX_apic(x) ((((x)&LAVR_Mask&0xf0)==0x0))
#define is_local_xapic(x) (((x)&LAVR_Mask)==0x14)
#define is_local_apic(x) (((x) & LAVR_Mask & 0xf0) == 0x1)
#define is_82489DX_apic(x) ((((x) & LAVR_Mask & 0xf0) == 0x0))
#define is_local_xapic(x) (((x) & LAVR_Mask) == 0x14)
 
/** Logical Destination Register. */
#define LDR (0x0d0/sizeof(uint32_t))
#define LDR (0x0d0 / sizeof(uint32_t))
typedef union {
uint32_t value;
struct {
266,7 → 266,7
} ldr_t;
 
/** Destination Format Register. */
#define DFR (0x0e0/sizeof(uint32_t))
#define DFR (0x0e0 / sizeof(uint32_t))
typedef union {
uint32_t value;
struct {
276,8 → 276,8
} dfr_t;
 
/* IO APIC */
#define IOREGSEL (0x00/sizeof(uint32_t))
#define IOWIN (0x10/sizeof(uint32_t))
#define IOREGSEL (0x00 / sizeof(uint32_t))
#define IOWIN (0x10 / sizeof(uint32_t))
 
#define IOAPICID 0x00
#define IOAPICVER 0x01
/branches/tracing/kernel/arch/ia32/include/atomic.h
41,17 → 41,17
 
static inline void atomic_inc(atomic_t *val) {
#ifdef CONFIG_SMP
asm volatile ("lock incl %0\n" : "=m" (val->count));
asm volatile ("lock incl %0\n" : "+m" (val->count));
#else
asm volatile ("incl %0\n" : "=m" (val->count));
asm volatile ("incl %0\n" : "+m" (val->count));
#endif /* CONFIG_SMP */
}
 
static inline void atomic_dec(atomic_t *val) {
#ifdef CONFIG_SMP
asm volatile ("lock decl %0\n" : "=m" (val->count));
asm volatile ("lock decl %0\n" : "+m" (val->count));
#else
asm volatile ("decl %0\n" : "=m" (val->count));
asm volatile ("decl %0\n" : "+m" (val->count));
#endif /* CONFIG_SMP */
}
 
61,7 → 61,7
 
asm volatile (
"lock xaddl %1, %0\n"
: "=m" (val->count), "+r" (r)
: "+m" (val->count), "+r" (r)
);
 
return r;
73,14 → 73,14
asm volatile (
"lock xaddl %1, %0\n"
: "=m" (val->count), "+r"(r)
: "+m" (val->count), "+r"(r)
);
return r;
}
 
#define atomic_preinc(val) (atomic_postinc(val)+1)
#define atomic_predec(val) (atomic_postdec(val)-1)
#define atomic_preinc(val) (atomic_postinc(val) + 1)
#define atomic_predec(val) (atomic_postdec(val) - 1)
 
static inline uint32_t test_and_set(atomic_t *val) {
uint32_t v;
88,7 → 88,7
asm volatile (
"movl $1, %0\n"
"xchgl %0, %1\n"
: "=r" (v),"=m" (val->count)
: "=r" (v),"+m" (val->count)
);
return v;
101,20 → 101,20
 
preemption_disable();
asm volatile (
"0:;"
"0:\n"
#ifdef CONFIG_HT
"pause;" /* Pentium 4's HT love this instruction */
"pause\n" /* Pentium 4's HT love this instruction */
#endif
"mov %0, %1;"
"testl %1, %1;"
"jnz 0b;" /* Lightweight looping on locked spinlock */
"mov %0, %1\n"
"testl %1, %1\n"
"jnz 0b\n" /* lightweight looping on locked spinlock */
"incl %1;" /* now use the atomic operation */
"xchgl %0, %1;"
"testl %1, %1;"
"jnz 0b;"
: "=m"(val->count),"=r"(tmp)
);
"incl %1\n" /* now use the atomic operation */
"xchgl %0, %1\n"
"testl %1, %1\n"
"jnz 0b\n"
: "+m" (val->count), "=r"(tmp)
);
/*
* Prevent critical section code from bleeding out this way up.
*/
/branches/tracing/kernel/arch/ia32/include/mm/page.h
128,6 → 128,8
 
#include <mm/mm.h>
#include <arch/interrupt.h>
#include <arch/types.h>
#include <typedefs.h>
 
/* Page fault error codes. */
 
/branches/tracing/kernel/arch/ia32/include/barrier.h
84,6 → 84,15
# endif
#endif
 
/*
* On ia32, the hardware takes care about instruction and data cache coherence,
* even on SMP systems. We issue a write barrier to be sure that writes
* queueing in the store buffer drain to the memory (even though it would be
* sufficient for them to drain to the D-cache).
*/
#define smc_coherence(a) write_barrier()
#define smc_coherence_block(a, l) write_barrier()
 
#endif
 
/** @}
/branches/tracing/kernel/arch/ia32/Makefile.inc
46,7 → 46,8
#
 
ifeq ($(MACHINE),athlon-xp)
CMN2 = -march=athlon-xp -mmmx -msse -m3dnow
FPU_NO_CFLAGS = -mno-mmx -mno-sse -mno-3dnow
CMN2 = -march=athlon-xp
GCC_CFLAGS += $(CMN2)
ICC_CFLAGS += $(CMN2)
SUNCC_CFLAGS += -xarch=ssea
55,7 → 56,8
CONFIG_HT = n
endif
ifeq ($(MACHINE),athlon-mp)
CMN2 = -march=athlon-mp -mmmx -msse -m3dnow
FPU_NO_CFLAGS = -mno-mmx -mno-sse -mno-3dnow
CMN2 = -march=athlon-mp
GCC_CFLAGS += $(CMN2)
ICC_CFLAGS += $(CMN2)
SUNCC_CFLAGS += xarch=ssea
63,7 → 65,8
CONFIG_HT = n
endif
ifeq ($(MACHINE),pentium3)
CMN2 = -march=pentium3 -mmmx -msse
FPU_NO_CFLAGS = -mno-mmx -mno-sse
CMN2 = -march=pentium3
GCC_CFLAGS += $(CMN2)
ICC_CFLAGS += $(CMN2)
SUNCC_CFLAGS += -xarch=sse
71,7 → 74,8
CONFIG_HT = n
endif
ifeq ($(MACHINE),core)
CMN2 = -march=prescott -mfpmath=sse -mmmx -msse -msse2 -msse3
FPU_NO_CFLAGS = -mno-mmmx -mno-sse -mno-sse2 -mno-sse3
CMN2 = -march=prescott
GCC_CFLAGS += $(CMN2)
ICC_CFLAGS += $(CMN2)
SUNCC_CFLAGS += -xarch=sse3
78,7 → 82,8
DEFS += -DCONFIG_FENCES_P4
endif
ifeq ($(MACHINE),pentium4)
GCC_CFLAGS += -march=pentium4 -mfpmath=sse -mmmx -msse -msse2
FPU_NO_CFLAGS = -mno-mmx -mno-sse -mno-sse2
GCC_CFLAGS += -march=pentium4
ICC_CFLAGS += -march=pentium4
SUNCC_CFLAGS += -xarch=sse2
DEFS += -DCONFIG_FENCES_P4
/branches/tracing/kernel/arch/ia32/src/breakpoint.c
34,7 → 34,7
 
#include <arch/breakpoint.h>
#include <panic.h>
#include <console/klog.h>
#include <print.h>
#include <interrupt.h>
#include <func.h>
 
44,7 → 44,7
ASSERT(istate_from_uspace(istate));
 
(void)istate;
klog_printf("breakpoint exception\n");
printf("breakpoint exception\n");
interrupts_enable();
udebug_breakpoint_event(istate->eip);
}
/branches/tracing/kernel/arch/ia32/src/asm.S
37,6 → 37,8
.global paging_on
.global enable_l_apic_in_msr
.global interrupt_handlers
.global memsetb
.global memsetw
.global memcpy
.global memcpy_from_uspace
.global memcpy_from_uspace_failover_address
44,6 → 46,15
.global memcpy_to_uspace_failover_address
 
 
# Wrapper for generic memsetb
memsetb:
jmp _memsetb
 
# Wrapper for generic memsetw
memsetw:
jmp _memsetw
 
 
#define MEMCPY_DST 4
#define MEMCPY_SRC 8
#define MEMCPY_SIZE 12
/branches/tracing/kernel/arch/ia32/src/debug/panic.s
30,5 → 30,5
.global panic_printf
 
panic_printf:
movl $halt,(%esp) # fake stack to make printf return to halt
movl $halt, (%esp) # fake stack to make printf return to halt
jmp printf
/branches/tracing/kernel/arch/ia32/src/pm.c
112,7 → 112,7
 
void tss_initialize(tss_t *t)
{
memsetb((uintptr_t) t, sizeof(struct tss), 0);
memsetb(t, sizeof(struct tss), 0);
}
 
/*
240,7 → 240,7
preemption_disable();
ipl_t ipl = interrupts_disable();
memsetb((uintptr_t) idt, sizeof(idt), 0);
memsetb(idt, sizeof(idt), 0);
ptr_16_32_t idtr;
idtr.limit = sizeof(idt);
/branches/tracing/kernel/arch/ia32/src/smp/smp.c
160,8 → 160,7
panic("couldn't allocate memory for GDT\n");
 
memcpy(gdt_new, gdt, GDT_ITEMS * sizeof(struct descriptor));
memsetb((uintptr_t)(&gdt_new[TSS_DES]),
sizeof(struct descriptor), 0);
memsetb(&gdt_new[TSS_DES], sizeof(struct descriptor), 0);
protected_ap_gdtr.limit = GDT_ITEMS * sizeof(struct descriptor);
protected_ap_gdtr.base = KA2PA((uintptr_t) gdt_new);
gdtr.base = (uintptr_t) gdt_new;
/branches/tracing/kernel/arch/ia32/src/smp/ap.S
45,7 → 45,7
KTEXT=8
KDATA=16
 
# This piece of code is real-mode and is meant to be alligned at 4K boundary.
# This piece of code is real-mode and is meant to be aligned at 4K boundary.
# The requirement for such an alignment comes from MP Specification's STARTUP IPI
# requirements.
 
/branches/tracing/kernel/arch/ia32/src/drivers/ega.c
115,7 → 115,7
return;
 
memcpy((void *) videoram, (void *) (videoram + ROW * 2), (SCREEN - ROW) * 2);
memsetw((uintptr_t) (videoram + (SCREEN - ROW) * 2), ROW, 0x0720);
memsetw(videoram + (SCREEN - ROW) * 2, ROW, 0x0720);
ega_cursor = ega_cursor - ROW;
}