Subversion Repositories HelenOS

Compare Revisions

No changes between revisions

Ignore whitespace Rev 2421 → Rev 2422

/branches/fs/kernel/test/synch/rwlock3.c
45,14 → 45,14
thread_detach(THREAD);
if (!sh_quiet)
printf("cpu%d, tid %d: trying to lock rwlock for reading....\n", CPU->id, THREAD->tid);
printf("cpu%d, tid %llu: trying to lock rwlock for reading....\n", CPU->id, THREAD->tid);
rwlock_read_lock(&rwlock);
rwlock_read_unlock(&rwlock);
if (!sh_quiet) {
printf("cpu%d, tid %d: success\n", CPU->id, THREAD->tid);
printf("cpu%d, tid %d: trying to lock rwlock for writing....\n", CPU->id, THREAD->tid);
printf("cpu%d, tid %llu: success\n", CPU->id, THREAD->tid);
printf("cpu%d, tid %llu: trying to lock rwlock for writing....\n", CPU->id, THREAD->tid);
}
 
rwlock_write_lock(&rwlock);
59,7 → 59,7
rwlock_write_unlock(&rwlock);
if (!sh_quiet)
printf("cpu%d, tid %d: success\n", CPU->id, THREAD->tid);
printf("cpu%d, tid %llu: success\n", CPU->id, THREAD->tid);
atomic_dec(&thread_count);
}
/branches/fs/kernel/test/synch/rwlock4.c
74,18 → 74,18
to = random(40000);
if (!sh_quiet)
printf("cpu%d, tid %d w+ (%d)\n", CPU->id, THREAD->tid, to);
printf("cpu%d, tid %llu w+ (%d)\n", CPU->id, THREAD->tid, to);
rc = rwlock_write_lock_timeout(&rwlock, to);
if (SYNCH_FAILED(rc)) {
if (!sh_quiet)
printf("cpu%d, tid %d w!\n", CPU->id, THREAD->tid);
printf("cpu%d, tid %llu w!\n", CPU->id, THREAD->tid);
atomic_dec(&thread_count);
return;
}
if (!sh_quiet)
printf("cpu%d, tid %d w=\n", CPU->id, THREAD->tid);
printf("cpu%d, tid %llu w=\n", CPU->id, THREAD->tid);
 
if (rwlock.readers_in) {
if (!sh_quiet)
106,7 → 106,7
rwlock_write_unlock(&rwlock);
if (!sh_quiet)
printf("cpu%d, tid %d w-\n", CPU->id, THREAD->tid);
printf("cpu%d, tid %llu w-\n", CPU->id, THREAD->tid);
atomic_dec(&thread_count);
}
 
119,24 → 119,24
to = random(2000);
if (!sh_quiet)
printf("cpu%d, tid %d r+ (%d)\n", CPU->id, THREAD->tid, to);
printf("cpu%d, tid %llu r+ (%d)\n", CPU->id, THREAD->tid, to);
rc = rwlock_read_lock_timeout(&rwlock, to);
if (SYNCH_FAILED(rc)) {
if (!sh_quiet)
printf("cpu%d, tid %d r!\n", CPU->id, THREAD->tid);
printf("cpu%d, tid %llu r!\n", CPU->id, THREAD->tid);
atomic_dec(&thread_count);
return;
}
if (!sh_quiet)
printf("cpu%d, tid %d r=\n", CPU->id, THREAD->tid);
printf("cpu%d, tid %llu r=\n", CPU->id, THREAD->tid);
thread_usleep(30000);
rwlock_read_unlock(&rwlock);
if (!sh_quiet)
printf("cpu%d, tid %d r-\n", CPU->id, THREAD->tid);
printf("cpu%d, tid %llu r-\n", CPU->id, THREAD->tid);
atomic_dec(&thread_count);
}
 
/branches/fs/kernel/test/synch/semaphore2.c
67,18 → 67,18
waitq_sleep(&can_start);
to = random(20000);
printf("cpu%d, tid %d down+ (%d)\n", CPU->id, THREAD->tid, to);
printf("cpu%d, tid %llu down+ (%d)\n", CPU->id, THREAD->tid, to);
rc = semaphore_down_timeout(&sem, to);
if (SYNCH_FAILED(rc)) {
printf("cpu%d, tid %d down!\n", CPU->id, THREAD->tid);
printf("cpu%d, tid %llu down!\n", CPU->id, THREAD->tid);
return;
}
printf("cpu%d, tid %d down=\n", CPU->id, THREAD->tid);
printf("cpu%d, tid %llu down=\n", CPU->id, THREAD->tid);
thread_usleep(random(30000));
semaphore_up(&sem);
printf("cpu%d, tid %d up\n", CPU->id, THREAD->tid);
printf("cpu%d, tid %llu up\n", CPU->id, THREAD->tid);
}
 
char * test_semaphore2(bool quiet)
/branches/fs/kernel/test/thread/thread1.c
48,8 → 48,8
 
while (atomic_get(&finish)) {
if (!sh_quiet)
printf("%d\n", (int) (THREAD->tid));
thread_usleep(100);
printf("%llu ", THREAD->tid);
thread_usleep(100000);
}
atomic_inc(&threads_finished);
}
/branches/fs/kernel/test/mm/falloc2.c
58,7 → 58,7
uintptr_t * frames = (uintptr_t *) malloc(MAX_FRAMES * sizeof(uintptr_t), FRAME_ATOMIC);
if (frames == NULL) {
if (!sh_quiet)
printf("Thread #%d (cpu%d): Unable to allocate frames\n", THREAD->tid, CPU->id);
printf("Thread #%llu (cpu%d): Unable to allocate frames\n", THREAD->tid, CPU->id);
atomic_inc(&thread_fail);
atomic_dec(&thread_count);
return;
69,7 → 69,7
for (run = 0; run < THREAD_RUNS; run++) {
for (order = 0; order <= MAX_ORDER; order++) {
if (!sh_quiet)
printf("Thread #%d (cpu%d): Allocating %d frames blocks ... \n", THREAD->tid, CPU->id, 1 << order);
printf("Thread #%llu (cpu%d): Allocating %d frames blocks ... \n", THREAD->tid, CPU->id, 1 << order);
allocated = 0;
for (i = 0; i < (MAX_FRAMES >> order); i++) {
82,16 → 82,16
}
if (!sh_quiet)
printf("Thread #%d (cpu%d): %d blocks allocated.\n", THREAD->tid, CPU->id, allocated);
printf("Thread #%llu (cpu%d): %d blocks allocated.\n", THREAD->tid, CPU->id, allocated);
if (!sh_quiet)
printf("Thread #%d (cpu%d): Deallocating ... \n", THREAD->tid, CPU->id);
printf("Thread #%llu (cpu%d): Deallocating ... \n", THREAD->tid, CPU->id);
for (i = 0; i < allocated; i++) {
for (k = 0; k <= ((FRAME_SIZE << order) - 1); k++) {
if (((uint8_t *) frames[i])[k] != val) {
if (!sh_quiet)
printf("Thread #%d (cpu%d): Unexpected data (%d) in block %p offset %#zx\n", THREAD->tid, CPU->id, ((char *) frames[i])[k], frames[i], k);
printf("Thread #%llu (cpu%d): Unexpected data (%d) in block %p offset %#zx\n", THREAD->tid, CPU->id, ((char *) frames[i])[k], frames[i], k);
atomic_inc(&thread_fail);
goto cleanup;
}
100,7 → 100,7
}
if (!sh_quiet)
printf("Thread #%d (cpu%d): Finished run.\n", THREAD->tid, CPU->id);
printf("Thread #%llu (cpu%d): Finished run.\n", THREAD->tid, CPU->id);
}
}
 
108,7 → 108,7
free(frames);
if (!sh_quiet)
printf("Thread #%d (cpu%d): Exiting\n", THREAD->tid, CPU->id);
printf("Thread #%llu (cpu%d): Exiting\n", THREAD->tid, CPU->id);
atomic_dec(&thread_count);
}
 
/branches/fs/kernel/test/mm/slab1.c
137,7 → 137,7
thread_detach(THREAD);
if (!sh_quiet)
printf("Starting thread #%d...\n", THREAD->tid);
printf("Starting thread #%llu...\n", THREAD->tid);
for (j = 0; j < 10; j++) {
for (i = 0; i < THR_MEM_COUNT; i++)
151,7 → 151,7
}
if (!sh_quiet)
printf("Thread #%d finished\n", THREAD->tid);
printf("Thread #%llu finished\n", THREAD->tid);
semaphore_up(&thr_sem);
}
/branches/fs/kernel/test/mm/slab2.c
150,11 → 150,11
mutex_unlock(&starter_mutex);
if (!sh_quiet)
printf("Starting thread #%d...\n",THREAD->tid);
printf("Starting thread #%llu...\n",THREAD->tid);
 
/* Alloc all */
if (!sh_quiet)
printf("Thread #%d allocating...\n", THREAD->tid);
printf("Thread #%llu allocating...\n", THREAD->tid);
while (1) {
/* Call with atomic to detect end of memory */
166,7 → 166,7
}
if (!sh_quiet)
printf("Thread #%d releasing...\n", THREAD->tid);
printf("Thread #%llu releasing...\n", THREAD->tid);
while (data) {
new = *((void **)data);
176,7 → 176,7
}
if (!sh_quiet)
printf("Thread #%d allocating...\n", THREAD->tid);
printf("Thread #%llu allocating...\n", THREAD->tid);
while (1) {
/* Call with atomic to detect end of memory */
188,7 → 188,7
}
if (!sh_quiet)
printf("Thread #%d releasing...\n", THREAD->tid);
printf("Thread #%llu releasing...\n", THREAD->tid);
while (data) {
new = *((void **)data);
198,7 → 198,7
}
if (!sh_quiet)
printf("Thread #%d finished\n", THREAD->tid);
printf("Thread #%llu finished\n", THREAD->tid);
slab_print_list();
semaphore_up(&thr_sem);
/branches/fs/kernel/test/fpu/mips2.c
72,7 → 72,7
if (arg != after_arg) {
if (!sh_quiet)
printf("General reg tid%d: arg(%d) != %d\n", THREAD->tid, arg, after_arg);
printf("General reg tid%llu: arg(%d) != %d\n", THREAD->tid, arg, after_arg);
atomic_inc(&threads_fault);
break;
}
104,7 → 104,7
if (arg != after_arg) {
if (!sh_quiet)
printf("General reg tid%d: arg(%d) != %d\n", THREAD->tid, arg, after_arg);
printf("General reg tid%llu: arg(%d) != %d\n", THREAD->tid, arg, after_arg);
atomic_inc(&threads_fault);
break;
}
/branches/fs/kernel/test/fpu/fpu1.c
126,7 → 126,7
 
if ((int) (100000000 * e) != E_10e8) {
if (!sh_quiet)
printf("tid%d: e*10e8=%zd should be %zd\n", THREAD->tid, (unative_t) (100000000 * e), (unative_t) E_10e8);
printf("tid%llu: e*10e8=%zd should be %zd\n", THREAD->tid, (unative_t) (100000000 * e), (unative_t) E_10e8);
atomic_inc(&threads_fault);
break;
}
161,7 → 161,7
#ifdef KERN_ia64_ARCH_H_
if ((int) (1000000 * pi) != PI_10e8) {
if (!sh_quiet)
printf("tid%d: pi*10e8=%zd should be %zd\n", THREAD->tid, (unative_t) (1000000 * pi), (unative_t) (PI_10e8 / 100));
printf("tid%llu: pi*10e8=%zd should be %zd\n", THREAD->tid, (unative_t) (1000000 * pi), (unative_t) (PI_10e8 / 100));
atomic_inc(&threads_fault);
break;
}
168,7 → 168,7
#else
if ((int) (100000000 * pi) != PI_10e8) {
if (!sh_quiet)
printf("tid%d: pi*10e8=%zd should be %zd\n", THREAD->tid, (unative_t) (100000000 * pi), (unative_t) PI_10e8);
printf("tid%llu: pi*10e8=%zd should be %zd\n", THREAD->tid, (unative_t) (100000000 * pi), (unative_t) PI_10e8);
atomic_inc(&threads_fault);
break;
}
/branches/fs/kernel/test/fpu/sse1.c
72,7 → 72,7
if (arg != after_arg) {
if (!sh_quiet)
printf("tid%d: arg(%d) != %d\n", THREAD->tid, arg, after_arg);
printf("tid%llu: arg(%d) != %d\n", THREAD->tid, arg, after_arg);
atomic_inc(&threads_fault);
break;
}
104,7 → 104,7
if (arg != after_arg) {
if (!sh_quiet)
printf("tid%d: arg(%d) != %d\n", THREAD->tid, arg, after_arg);
printf("tid%llu: arg(%d) != %d\n", THREAD->tid, arg, after_arg);
atomic_inc(&threads_fault);
break;
}
/branches/fs/kernel/test/print/print1.c
25,6 → 25,7
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <print.h>
#include <test.h>
 
/branches/fs/kernel/kernel.config
92,9 → 92,6
# Lazy FPU context switching
! [(ARCH=mips32&MACHINE!=msim&MACHINE!=simics)|ARCH=amd64|ARCH=ia32|ARCH=ia64|ARCH=sparc64|ARCH=ia32xen] CONFIG_FPU_LAZY (y/n)
 
# Power off on halt
! [ARCH=ppc32] CONFIG_POWEROFF (n/y)
 
# Use VHPT
! [ARCH=ia64] CONFIG_VHPT (n/y)
 
/branches/fs/kernel/genarch/src/kbd/i8042.c
74,8 → 74,8
#define i8042_COMMAND 0x69
 
#define i8042_BUFFER_FULL_MASK 0x01
#define i8042_WAIT_MASK 0x02
#define i8042_MOUSE_DATA 0x20
#define i8042_WAIT_MASK 0x02
#define i8042_MOUSE_DATA 0x20
 
static void i8042_suspend(chardev_t *);
static void i8042_resume(chardev_t *);
90,23 → 90,10
static irq_t i8042_kbd_irq;
static irq_t i8042_mouse_irq;
 
/** Wait until the controller reads its data. */
static void i8042_wait(void) {
while (i8042_status_read() & i8042_WAIT_MASK) {
/* wait */
}
}
 
void i8042_grab(void)
{
ipl_t ipl = interrupts_disable();
i8042_wait();
i8042_command_write(i8042_SET_COMMAND);
i8042_wait();
i8042_data_write(i8042_COMMAND);
i8042_wait();
 
spinlock_lock(&i8042_kbd_irq.lock);
i8042_kbd_irq.notif_cfg.notify = false;
spinlock_unlock(&i8042_kbd_irq.lock);
140,34 → 127,28
return IRQ_ACCEPT;
}
 
static void i8042_kbd_irq_handler(irq_t *irq, void *arg, ...)
static void i8042_irq_handler(irq_t *irq, void *arg, ...)
{
if (irq->notif_cfg.notify && irq->notif_cfg.answerbox)
ipc_irq_send_notif(irq);
else {
uint8_t x;
uint8_t data;
uint8_t status;
while (((status = i8042_status_read()) & i8042_BUFFER_FULL_MASK)) {
x = i8042_data_read();
data = i8042_data_read();
if ((status & i8042_MOUSE_DATA))
continue;
if (x & KEY_RELEASE)
key_released(x ^ KEY_RELEASE);
 
if (data & KEY_RELEASE)
key_released(data ^ KEY_RELEASE);
else
key_pressed(x);
key_pressed(data);
}
}
}
 
static void i8042_mouse_irq_handler(irq_t *irq, void *arg, ...)
{
if (irq->notif_cfg.notify && irq->notif_cfg.answerbox)
ipc_irq_send_notif(irq);
}
 
/** Initialize i8042. */
void i8042_init(devno_t kbd_devno, inr_t kbd_inr, devno_t mouse_devno, inr_t mouse_inr)
{
178,7 → 159,7
i8042_kbd_irq.devno = kbd_devno;
i8042_kbd_irq.inr = kbd_inr;
i8042_kbd_irq.claim = i8042_claim;
i8042_kbd_irq.handler = i8042_kbd_irq_handler;
i8042_kbd_irq.handler = i8042_irq_handler;
irq_register(&i8042_kbd_irq);
irq_initialize(&i8042_mouse_irq);
185,7 → 166,7
i8042_mouse_irq.devno = mouse_devno;
i8042_mouse_irq.inr = mouse_inr;
i8042_mouse_irq.claim = i8042_claim;
i8042_mouse_irq.handler = i8042_mouse_irq_handler;
i8042_mouse_irq.handler = i8042_irq_handler;
irq_register(&i8042_mouse_irq);
trap_virtual_enable_irqs(1 << kbd_inr);
/branches/fs/kernel/genarch/src/fb/fb.c
511,8 → 511,6
sysinfo_set_item_val("fb.scanline", NULL, scan);
sysinfo_set_item_val("fb.visual", NULL, visual);
sysinfo_set_item_val("fb.address.physical", NULL, addr);
sysinfo_set_item_val("fb.address.color", NULL,
PAGE_COLOR((uintptr_t) fbaddress));
sysinfo_set_item_val("fb.invert-colors", NULL, invert_colors);
 
/* Allocate double buffer */
/branches/fs/kernel/genarch/src/mm/asid.c
62,15 → 62,9
#include <arch/mm/asid.h>
#include <synch/spinlock.h>
#include <synch/mutex.h>
#include <arch.h>
#include <adt/list.h>
#include <debug.h>
 
/**
* asidlock protects the asids_allocated counter.
*/
SPINLOCK_INITIALIZE(asidlock);
 
static count_t asids_allocated = 0;
 
/** Allocate free address space identifier.
90,7 → 84,6
* Check if there is an unallocated ASID.
*/
spinlock_lock(&asidlock);
if (asids_allocated == ASIDS_ALLOCABLE) {
 
/*
108,7 → 101,6
list_remove(tmp);
as = list_get_instance(tmp, as_t, inactive_as_with_asid_link);
mutex_lock_active(&as->lock);
 
/*
* Steal the ASID.
130,8 → 122,6
*/
as_invalidate_translation_cache(as, 0, (count_t) -1);
mutex_unlock(&as->lock);
 
/*
* Get the system rid of the stolen ASID.
*/
156,8 → 146,6
tlb_shootdown_finalize();
}
spinlock_unlock(&asidlock);
return asid;
}
 
170,16 → 158,8
*/
void asid_put(asid_t asid)
{
ipl_t ipl;
 
ipl = interrupts_disable();
spinlock_lock(&asidlock);
 
asids_allocated--;
asid_put_arch(asid);
spinlock_unlock(&asidlock);
interrupts_restore(ipl);
}
 
/** @}
/branches/fs/kernel/genarch/src/mm/page_ht.c
55,7 → 55,8
static bool compare(unative_t key[], count_t keys, link_t *item);
static void remove_callback(link_t *item);
 
static void ht_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int flags);
static void ht_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame,
int flags);
static void ht_mapping_remove(as_t *as, uintptr_t page);
static pte_t *ht_mapping_find(as_t *as, uintptr_t page);
 
103,7 → 104,7
* of occurring. Least significant bits of VPN compose the
* hash index.
*/
index = ((page >> PAGE_WIDTH) & (PAGE_HT_ENTRIES-1));
index = ((page >> PAGE_WIDTH) & (PAGE_HT_ENTRIES - 1));
/*
* Address space structures are likely to be allocated from
110,7 → 111,7
* similar addresses. Least significant bits compose the
* hash index.
*/
index |= ((unative_t) as) & (PAGE_HT_ENTRIES-1);
index |= ((unative_t) as) & (PAGE_HT_ENTRIES - 1);
return index;
}
136,7 → 137,8
t = hash_table_get_instance(item, pte_t, link);
 
if (keys == PAGE_HT_KEYS) {
return (key[KEY_AS] == (uintptr_t) t->as) && (key[KEY_PAGE] == t->page);
return (key[KEY_AS] == (uintptr_t) t->as) &&
(key[KEY_PAGE] == t->page);
} else {
return (key[KEY_AS] == (uintptr_t) t->as);
}
175,7 → 177,10
void ht_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int flags)
{
pte_t *t;
unative_t key[2] = { (uintptr_t) as, page = ALIGN_DOWN(page, PAGE_SIZE) };
unative_t key[2] = {
(uintptr_t) as,
page = ALIGN_DOWN(page, PAGE_SIZE)
};
if (!hash_table_find(&page_ht, key)) {
t = (pte_t *) malloc(sizeof(pte_t), FRAME_ATOMIC);
209,7 → 214,10
*/
void ht_mapping_remove(as_t *as, uintptr_t page)
{
unative_t key[2] = { (uintptr_t) as, page = ALIGN_DOWN(page, PAGE_SIZE) };
unative_t key[2] = {
(uintptr_t) as,
page = ALIGN_DOWN(page, PAGE_SIZE)
};
/*
* Note that removed PTE's will be freed
234,7 → 242,10
{
link_t *hlp;
pte_t *t = NULL;
unative_t key[2] = { (uintptr_t) as, page = ALIGN_DOWN(page, PAGE_SIZE) };
unative_t key[2] = {
(uintptr_t) as,
page = ALIGN_DOWN(page, PAGE_SIZE)
};
hlp = hash_table_find(&page_ht, key);
if (hlp)
/branches/fs/kernel/genarch/src/acpi/acpi.c
88,7 → 88,7
 
static void map_sdt(struct acpi_sdt_header *sdt)
{
page_mapping_insert(AS_KERNEL, (uintptr_t) sdt, (uintptr_t) sdt, PAGE_NOT_CACHEABLE);
page_mapping_insert(AS_KERNEL, (uintptr_t) sdt, (uintptr_t) sdt, PAGE_NOT_CACHEABLE | PAGE_WRITE);
map_structure((uintptr_t) sdt, sdt->length);
}
 
/branches/fs/kernel/generic/include/print.h
40,7 → 40,7
#include <arch/arg.h>
 
/* We need this address in spinlock to avoid deadlock in deadlock detection */
SPINLOCK_EXTERN(printflock);
SPINLOCK_EXTERN(printf_lock);
 
#define EOF (-1)
 
/branches/fs/kernel/generic/include/time/clock.h
35,8 → 35,19
#ifndef KERN_CLOCK_H_
#define KERN_CLOCK_H_
 
#include <arch/types.h>
 
#define HZ 100
 
/** Uptime structure */
typedef struct {
unative_t seconds1;
unative_t useconds;
unative_t seconds2;
} uptime_t;
 
extern uptime_t *uptime;
 
extern void clock(void);
extern void clock_counter_init(void);
 
/branches/fs/kernel/generic/include/proc/task.h
90,10 → 90,10
* Active asynchronous messages. It is used for limiting uspace to
* certain extent.
*/
atomic_t active_calls;
atomic_t active_calls;
/** Architecture specific task data. */
task_arch_t arch;
task_arch_t arch;
/**
* Serializes access to the B+tree of task's futexes. This mutex is
111,6 → 111,7
extern btree_t tasks_btree;
 
extern void task_init(void);
extern void task_done(void);
extern task_t *task_create(as_t *as, char *name);
extern void task_destroy(task_t *t);
extern task_t *task_run_program(void *program_addr, char *name);
/branches/fs/kernel/generic/include/proc/thread.h
53,7 → 53,11
 
/* Thread flags */
 
/** Thread cannot be migrated to another CPU. */
/** Thread cannot be migrated to another CPU.
*
* When using this flag, the caller must set cpu in the thread_t
* structure manually before calling thread_ready (even on uniprocessor).
*/
#define THREAD_FLAG_WIRED (1 << 0)
/** Thread was migrated to another CPU and has not run yet. */
#define THREAD_FLAG_STOLEN (1 << 1)
193,7 → 197,7
/** Thread's priority. Implemented as index to CPU->rq */
int priority;
/** Thread ID. */
uint32_t tid;
thread_id_t tid;
/** Architecture-specific data. */
thread_arch_t arch;
248,8 → 252,9
extern slab_cache_t *fpu_context_slab;
 
/* Thread syscall prototypes. */
unative_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name);
unative_t sys_thread_exit(int uspace_status);
extern unative_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name, thread_id_t *uspace_thread_id);
extern unative_t sys_thread_exit(int uspace_status);
extern unative_t sys_thread_get_id(thread_id_t *uspace_thread_id);
 
#endif
 
/branches/fs/kernel/generic/include/interrupt.h
48,7 → 48,7
#define fault_if_from_uspace(istate, cmd, ...) \
{ \
if (istate_from_uspace(istate)) { \
klog_printf("Task %lld killed due to an exception at %p.", TASK->taskid, istate_get_pc(istate)); \
klog_printf("Task %llu killed due to an exception at %p.", TASK->taskid, istate_get_pc(istate)); \
klog_printf(" " cmd, ##__VA_ARGS__); \
task_kill(TASK->taskid); \
thread_exit(); \
/branches/fs/kernel/generic/include/synch/mutex.h
44,13 → 44,11
} mutex_t;
 
#define mutex_lock(mtx) \
_mutex_lock_timeout((mtx),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NONE)
_mutex_lock_timeout((mtx), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE)
#define mutex_trylock(mtx) \
_mutex_lock_timeout((mtx),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NON_BLOCKING)
#define mutex_lock_timeout(mtx,usec) \
_mutex_lock_timeout((mtx),(usec),SYNCH_FLAGS_NON_BLOCKING)
#define mutex_lock_active(mtx) \
while (mutex_trylock((mtx)) != ESYNCH_OK_ATOMIC)
_mutex_lock_timeout((mtx), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING)
#define mutex_lock_timeout(mtx, usec) \
_mutex_lock_timeout((mtx), (usec), SYNCH_FLAGS_NON_BLOCKING)
 
extern void mutex_initialize(mutex_t *mtx);
extern int _mutex_lock_timeout(mutex_t *mtx, uint32_t usec, int flags);
/branches/fs/kernel/generic/include/synch/spinlock.h
101,8 → 101,26
preemption_enable();
}
 
#ifdef CONFIG_DEBUG_SPINLOCK
 
extern int printf(const char *, ...);
 
#define DEADLOCK_THRESHOLD 100000000
#define DEADLOCK_PROBE_INIT(pname) count_t pname = 0
#define DEADLOCK_PROBE(pname, value) \
if ((pname)++ > (value)) { \
(pname) = 0; \
printf("Deadlock probe %s: exceeded threshold %d\n", \
"cpu%d: function=%s, line=%d\n", \
#pname, (value), CPU->id, __FUNCTION__, __LINE__); \
}
#else
#define DEADLOCK_PROBE_INIT(pname)
#define DEADLOCK_PROBE(pname, value)
#endif
 
#else
 
/* On UP systems, spinlocks are effectively left out. */
#define SPINLOCK_DECLARE(name)
#define SPINLOCK_EXTERN(name)
113,6 → 131,9
#define spinlock_trylock(x) (preemption_disable(), 1)
#define spinlock_unlock(x) preemption_enable()
 
#define DEADLOCK_PROBE_INIT(pname)
#define DEADLOCK_PROBE(pname, value)
 
#endif
 
#endif
/branches/fs/kernel/generic/include/synch/waitq.h
40,8 → 40,10
#include <synch/synch.h>
#include <adt/list.h>
 
#define WAKEUP_FIRST 0
#define WAKEUP_ALL 1
typedef enum {
WAKEUP_FIRST = 0,
WAKEUP_ALL
} wakeup_mode_t;
 
/** Wait queue structure. */
typedef struct {
70,8 → 72,8
extern ipl_t waitq_sleep_prepare(waitq_t *wq);
extern int waitq_sleep_timeout_unsafe(waitq_t *wq, uint32_t usec, int flags);
extern void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl);
extern void waitq_wakeup(waitq_t *wq, bool all);
extern void _waitq_wakeup_unsafe(waitq_t *wq, bool all);
extern void waitq_wakeup(waitq_t *wq, wakeup_mode_t mode);
extern void _waitq_wakeup_unsafe(waitq_t *wq, wakeup_mode_t mode);
extern void waitq_interrupt_sleep(struct thread *t);
 
#endif
/branches/fs/kernel/generic/include/ddi/irq.h
121,6 → 121,14
* this lock must not be taken first.
*/
SPINLOCK_DECLARE(lock);
/** Send EOI before processing the interrupt.
* This is essential for timer interrupt which
* has to be acknowledged before doing preemption
* to make sure another timer interrupt will
* be eventually generated.
*/
bool preack;
 
/** Unique device number. -1 if not yet assigned. */
devno_t devno;
127,7 → 135,7
 
/** Actual IRQ number. -1 if not yet assigned. */
inr_t inr;
/** Trigger level of the IRQ.*/
/** Trigger level of the IRQ. */
irq_trigger_t trigger;
/** Claim ownership of the IRQ. */
irq_ownership_t (* claim)(void);
/branches/fs/kernel/generic/include/printf/printf_core.h
47,7 → 47,7
 
};
 
int printf_core(const char *fmt, struct printf_spec *ps ,va_list ap);
int printf_core(const char *fmt, struct printf_spec *ps, va_list ap);
 
#endif
 
/branches/fs/kernel/generic/include/arch.h
74,8 → 74,12
extern void arch_post_cpu_init(void);
extern void arch_pre_smp_init(void);
extern void arch_post_smp_init(void);
 
extern void calibrate_delay_loop(void);
 
extern void reboot(void);
extern void arch_reboot(void);
 
#endif
 
/** @}
/branches/fs/kernel/generic/include/mm/as.h
89,24 → 89,26
@public
/** Protected by asidlock. */
link_t inactive_as_with_asid_link;
/**
* Number of processors on wich is this address space active.
* Protected by asidlock.
*/
count_t cpu_refcount;
/**
* Address space identifier.
* Constant on architectures that do not support ASIDs.
* Protected by asidlock.
*/
asid_t asid;
/** Number of references (i.e tasks that reference this as). */
atomic_t refcount;
 
mutex_t lock;
/** Number of references (i.e tasks that reference this as). */
count_t refcount;
/** Number of processors on wich is this address space active. */
count_t cpu_refcount;
/** B+tree of address space areas. */
btree_t as_area_btree;
/**
* Address space identifier.
* Constant on architectures that do not support ASIDs.
*/
asid_t asid;
/** Non-generic content. */
as_genarch_t genarch;
133,24 → 135,26
typedef struct as {
/** Protected by asidlock. */
link_t inactive_as_with_asid_link;
/**
* Number of processors on wich is this address space active.
* Protected by asidlock.
*/
count_t cpu_refcount;
/**
* Address space identifier.
* Constant on architectures that do not support ASIDs.
* Protected by asidlock.
*/
asid_t asid;
 
mutex_t lock;
 
/** Number of references (i.e tasks that reference this as). */
count_t refcount;
atomic_t refcount;
 
/** Number of processors on wich is this address space active. */
count_t cpu_refcount;
mutex_t lock;
 
/** B+tree of address space areas. */
btree_t as_area_btree;
/**
* Address space identifier.
* Constant on architectures that do not support ASIDs.
*/
asid_t asid;
/** Non-generic content. */
as_genarch_t genarch;
 
205,7 → 209,6
/** Address space area structure.
*
* Each as_area_t structure describes one contiguous area of virtual memory.
* In the future, it should not be difficult to support shared areas.
*/
typedef struct {
mutex_t lock;
250,7 → 253,6
extern as_operations_t *as_operations;
#endif
 
SPINLOCK_EXTERN(inactive_as_with_asid_lock);
extern link_t inactive_as_with_asid_head;
 
extern void as_init(void);
/branches/fs/kernel/generic/include/syscall/syscall.h
40,6 → 40,7
SYS_TLS_SET = 1, /* Hardcoded in AMD64, IA32 uspace - psthread.S */
SYS_THREAD_CREATE,
SYS_THREAD_EXIT,
SYS_THREAD_GET_ID,
SYS_TASK_GET_ID,
SYS_FUTEX_SLEEP,
SYS_FUTEX_WAKEUP,
/branches/fs/kernel/generic/src/synch/spinlock.c
73,7 → 73,6
* @param sl Pointer to spinlock_t structure.
*/
#ifdef CONFIG_DEBUG_SPINLOCK
#define DEADLOCK_THRESHOLD 100000000
void spinlock_lock_debug(spinlock_t *sl)
{
count_t i = 0;
84,7 → 83,7
while (test_and_set(&sl->val)) {
 
/*
* We need to be careful about printflock and fb_lock.
* We need to be careful about printf_lock and fb_lock.
* Both of them are used to report deadlocks via
* printf() and fb_putchar().
*
94,13 → 93,13
* However, we encountered false positives caused by very
* slow VESA framebuffer interaction (especially when
* run in a simulator) that caused problems with both
* printflock and fb_lock.
* printf_lock and fb_lock.
*
* Possible deadlocks on both printflock and fb_lock
* Possible deadlocks on both printf_lock and fb_lock
* are therefore not reported as they would cause an
* infinite recursion.
*/
if (sl == &printflock)
if (sl == &printf_lock)
continue;
#ifdef CONFIG_FB
if (sl == &fb_lock)
/branches/fs/kernel/generic/src/synch/waitq.c
86,6 → 86,7
thread_t *t = (thread_t *) data;
waitq_t *wq;
bool do_wakeup = false;
DEADLOCK_PROBE_INIT(p_wqlock);
 
spinlock_lock(&threads_lock);
if (!thread_exists(t))
96,6 → 97,7
if ((wq = t->sleep_queue)) { /* assignment */
if (!spinlock_trylock(&wq->lock)) {
spinlock_unlock(&t->lock);
DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD);
goto grab_locks; /* avoid deadlock */
}
 
128,6 → 130,7
waitq_t *wq;
bool do_wakeup = false;
ipl_t ipl;
DEADLOCK_PROBE_INIT(p_wqlock);
 
ipl = interrupts_disable();
spinlock_lock(&threads_lock);
147,6 → 150,7
if (!spinlock_trylock(&wq->lock)) {
spinlock_unlock(&t->lock);
DEADLOCK_PROBE(p_wqlock, DEADLOCK_THRESHOLD);
goto grab_locks; /* avoid deadlock */
}
 
379,11 → 383,10
* Besides its 'normal' wakeup operation, it attempts to unregister possible
* timeout.
*
* @param wq Pointer to wait queue.
* @param all If this is non-zero, all sleeping threads will be woken up and
* missed count will be zeroed.
* @param wq Pointer to wait queue.
* @param mode Wakeup mode.
*/
void waitq_wakeup(waitq_t *wq, bool all)
void waitq_wakeup(waitq_t *wq, wakeup_mode_t mode)
{
ipl_t ipl;
 
390,10 → 393,10
ipl = interrupts_disable();
spinlock_lock(&wq->lock);
 
_waitq_wakeup_unsafe(wq, all);
_waitq_wakeup_unsafe(wq, mode);
 
spinlock_unlock(&wq->lock);
interrupts_restore(ipl);
spinlock_unlock(&wq->lock);
interrupts_restore(ipl);
}
 
/** Internal SMP- and IRQ-unsafe version of waitq_wakeup()
401,22 → 404,27
* This is the internal SMP- and IRQ-unsafe version of waitq_wakeup(). It
* assumes wq->lock is already locked and interrupts are already disabled.
*
* @param wq Pointer to wait queue.
* @param all If this is non-zero, all sleeping threads will be woken up and
* missed count will be zeroed.
* @param wq Pointer to wait queue.
* @param mode If mode is WAKEUP_FIRST, then the longest waiting thread,
* if any, is woken up. If mode is WAKEUP_ALL, then all
* waiting threads, if any, are woken up. If there are no
* waiting threads to be woken up, the missed wakeup is
* recorded in the wait queue.
*/
void _waitq_wakeup_unsafe(waitq_t *wq, bool all)
void _waitq_wakeup_unsafe(waitq_t *wq, wakeup_mode_t mode)
{
thread_t *t;
count_t count = 0;
 
loop:
if (list_empty(&wq->head)) {
wq->missed_wakeups++;
if (all)
wq->missed_wakeups = 0;
if (count && mode == WAKEUP_ALL)
wq->missed_wakeups--;
return;
}
 
count++;
t = list_get_instance(wq->head.next, thread_t, wq_link);
/*
445,7 → 453,7
 
thread_ready(t);
 
if (all)
if (mode == WAKEUP_ALL)
goto loop;
}
 
/branches/fs/kernel/generic/src/main/kinit.c
118,7 → 118,7
 
#ifdef CONFIG_SMP
if (config.cpu_count > 1) {
unsigned int i;
count_t i;
/*
* For each CPU, create its load balancing thread.
167,6 → 167,7
 
task_t *utask = task_run_program((void *) init.tasks[i].addr,
"uspace");
if (utask) {
/*
* Set capabilities to init userspace tasks.
/branches/fs/kernel/generic/src/main/shutdown.c
0,0 → 1,53
/*
* Copyright (c) 2007 Martin Decky
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup main
* @{
*/
 
/**
* @file
* @brief Shutdown procedures.
*/
 
#include <arch.h>
#include <print.h>
 
void reboot(void)
{
task_done();
#ifdef CONFIG_DEBUG
printf("Rebooting the system\n");
#endif
arch_reboot();
}
 
/** @}
*/
/branches/fs/kernel/generic/src/time/timeout.c
45,7 → 45,6
#include <arch/asm.h>
#include <arch.h>
 
 
/** Initialize timeouts
*
* Initialize kernel timeouts.
175,6 → 174,7
timeout_t *hlp;
link_t *l;
ipl_t ipl;
DEADLOCK_PROBE_INIT(p_tolock);
 
grab_locks:
ipl = interrupts_disable();
186,7 → 186,8
}
if (!spinlock_trylock(&t->cpu->timeoutlock)) {
spinlock_unlock(&t->lock);
interrupts_restore(ipl);
interrupts_restore(ipl);
DEADLOCK_PROBE(p_tolock, DEADLOCK_THRESHOLD);
goto grab_locks;
}
/branches/fs/kernel/generic/src/time/clock.c
41,7 → 41,6
#include <time/clock.h>
#include <time/timeout.h>
#include <arch/types.h>
#include <config.h>
#include <synch/spinlock.h>
#include <synch/waitq.h>
57,16 → 56,12
#include <mm/frame.h>
#include <ddi/ddi.h>
 
/** Physical memory area of the real time clock. */
/* Pointer to variable with uptime */
uptime_t *uptime;
 
/** Physical memory area of the real time clock */
static parea_t clock_parea;
 
/* Pointers to public variables with time */
struct ptime {
unative_t seconds1;
unative_t useconds;
unative_t seconds2;
};
struct ptime *public_time;
/* Variable holding fragment of second, so that we would update
* seconds correctly
*/
86,15 → 81,14
if (!faddr)
panic("Cannot allocate page for clock");
public_time = (struct ptime *) PA2KA(faddr);
uptime = (uptime_t *) PA2KA(faddr);
uptime->seconds1 = 0;
uptime->seconds2 = 0;
uptime->useconds = 0;
 
/* TODO: We would need some arch dependent settings here */
public_time->seconds1 = 0;
public_time->seconds2 = 0;
public_time->useconds = 0;
 
clock_parea.pbase = (uintptr_t) faddr;
clock_parea.vbase = (uintptr_t) public_time;
clock_parea.vbase = (uintptr_t) uptime;
clock_parea.frames = 1;
clock_parea.cacheable = true;
ddi_parea_register(&clock_parea);
104,8 → 98,6
* physmem_map() the clock_parea.
*/
sysinfo_set_item_val("clock.cacheable", NULL, (unative_t) true);
sysinfo_set_item_val("clock.fcolor", NULL, (unative_t)
PAGE_COLOR(clock_parea.vbase));
sysinfo_set_item_val("clock.faddr", NULL, (unative_t) faddr);
}
 
118,16 → 110,16
static void clock_update_counters(void)
{
if (CPU->id == 0) {
secfrag += 1000000/HZ;
secfrag += 1000000 / HZ;
if (secfrag >= 1000000) {
secfrag -= 1000000;
public_time->seconds1++;
uptime->seconds1++;
write_barrier();
public_time->useconds = secfrag;
uptime->useconds = secfrag;
write_barrier();
public_time->seconds2 = public_time->seconds1;
uptime->seconds2 = uptime->seconds1;
} else
public_time->useconds += 1000000/HZ;
uptime->useconds += 1000000 / HZ;
}
}
 
/branches/fs/kernel/generic/src/ddi/ddi.c
99,8 → 99,7
* @return 0 on success, EPERM if the caller lacks capabilities to use this
* syscall, ENOENT if there is no task matching the specified ID or the
* physical address space is not enabled for mapping and ENOMEM if there
* was a problem in creating address space area. ENOTSUP is returned when
* an attempt to create an illegal address alias is detected.
* was a problem in creating address space area.
*/
static int ddi_physmem_map(uintptr_t pf, uintptr_t vp, count_t pages, int flags)
{
139,18 → 138,6
interrupts_restore(ipl);
return ENOENT;
}
 
#ifdef CONFIG_VIRT_IDX_DCACHE
if (PAGE_COLOR(parea->vbase) != PAGE_COLOR(vp)) {
/*
* Refuse to create an illegal address alias.
*/
spinlock_unlock(&parea_lock);
interrupts_restore(ipl);
return ENOTSUP;
}
#endif /* CONFIG_VIRT_IDX_DCACHE */
 
spinlock_unlock(&parea_lock);
 
spinlock_lock(&TASK->lock);
/branches/fs/kernel/generic/src/ddi/irq.c
138,6 → 138,7
{
link_initialize(&irq->link);
spinlock_initialize(&irq->lock, "irq.lock");
irq->preack = false;
irq->inr = -1;
irq->devno = -1;
irq->trigger = (irq_trigger_t) 0;
/branches/fs/kernel/generic/src/printf/snprintf.c
50,4 → 50,3
 
/** @}
*/
 
/branches/fs/kernel/generic/src/printf/vprintf.c
35,7 → 35,11
#include <print.h>
#include <printf/printf_core.h>
#include <putchar.h>
#include <synch/spinlock.h>
#include <arch/asm.h>
 
SPINLOCK_INITIALIZE(printf_lock); /**< vprintf spinlock */
 
static int vprintf_write(const char *str, size_t count, void *unused)
{
size_t i;
55,8 → 59,16
int vprintf(const char *fmt, va_list ap)
{
struct printf_spec ps = {(int(*)(void *, size_t, void *)) vprintf_write, NULL};
return printf_core(fmt, &ps, ap);
 
int irqpri = interrupts_disable();
spinlock_lock(&printf_lock);
int ret = printf_core(fmt, &ps, ap);
spinlock_unlock(&printf_lock);
interrupts_restore(irqpri);
return ret;
}
 
/** @}
/branches/fs/kernel/generic/src/printf/vsnprintf.c
42,8 → 42,6
char *string; /* destination string */
};
 
int vsnprintf_write(const char *str, size_t count, struct vsnprintf_data *data);
 
/** Write string to given buffer.
* Write at most data->size characters including trailing zero. According to C99, snprintf() has to return number
* of characters that would have been written if enough space had been available. Hence the return value is not
54,7 → 52,7
* @param data structure with destination string, counter of used space and total string size.
* @return number of characters to print (not characters really printed!)
*/
int vsnprintf_write(const char *str, size_t count, struct vsnprintf_data *data)
static int vsnprintf_write(const char *str, size_t count, struct vsnprintf_data *data)
{
size_t i;
i = data->size - data->len;
/branches/fs/kernel/generic/src/printf/printf_core.c
38,14 → 38,9
#include <printf/printf_core.h>
#include <putchar.h>
#include <print.h>
#include <synch/spinlock.h>
#include <arch/arg.h>
#include <arch/asm.h>
 
#include <arch.h>
 
SPINLOCK_INITIALIZE(printflock); /**< printf spinlock */
 
#define __PRINTF_FLAG_PREFIX 0x00000001 /**< show prefixes 0x or 0*/
#define __PRINTF_FLAG_SIGNED 0x00000002 /**< signed / unsigned number */
#define __PRINTF_FLAG_ZEROPADDED 0x00000004 /**< print leading zeroes */
458,7 → 453,6
*/
int printf_core(const char *fmt, struct printf_spec *ps, va_list ap)
{
int irqpri;
int i = 0, j = 0; /**< i is index of currently processed char from fmt, j is index to the first not printed nonformating character */
int end;
int counter; /**< counter of printed characters */
472,10 → 466,7
uint64_t flags;
counter = 0;
irqpri = interrupts_disable();
spinlock_lock(&printflock);
 
while ((c = fmt[i])) {
/* control character */
if (c == '%' ) {
712,8 → 703,6
}
 
out:
spinlock_unlock(&printflock);
interrupts_restore(irqpri);
return counter;
}
/branches/fs/kernel/generic/src/printf/vsprintf.c
36,7 → 36,7
 
int vsprintf(char *str, const char *fmt, va_list ap)
{
return vsnprintf(str, (size_t)-1, fmt, ap);
return vsnprintf(str, (size_t) - 1, fmt, ap);
}
 
/** @}
/branches/fs/kernel/generic/src/console/cmd.c
48,6 → 48,7
#include <arch/types.h>
#include <adt/list.h>
#include <arch.h>
#include <config.h>
#include <func.h>
#include <macros.h>
#include <debug.h>
79,10 → 80,26
 
static cmd_info_t exit_info = {
.name = "exit",
.description = "Exit kconsole",
.description = "Exit kconsole.",
.argc = 0
};
 
static int cmd_reboot(cmd_arg_t *argv);
static cmd_info_t reboot_info = {
.name = "reboot",
.description = "Reboot.",
.func = cmd_reboot,
.argc = 0
};
 
static int cmd_uptime(cmd_arg_t *argv);
static cmd_info_t uptime_info = {
.name = "uptime",
.description = "Print uptime information.",
.func = cmd_uptime,
.argc = 0
};
 
static int cmd_continue(cmd_arg_t *argv);
static cmd_info_t continue_info = {
.name = "continue",
192,10 → 209,10
};
 
/* Data and methods for 'call0' command. */
static char call0_buf[MAX_CMDLINE+1];
static char carg1_buf[MAX_CMDLINE+1];
static char carg2_buf[MAX_CMDLINE+1];
static char carg3_buf[MAX_CMDLINE+1];
static char call0_buf[MAX_CMDLINE + 1];
static char carg1_buf[MAX_CMDLINE + 1];
static char carg2_buf[MAX_CMDLINE + 1];
static char carg3_buf[MAX_CMDLINE + 1];
 
static int cmd_call0(cmd_arg_t *argv);
static cmd_arg_t call0_argv = {
211,6 → 228,21
.argv = &call0_argv
};
 
/* Data and methods for 'mcall0' command. */
static int cmd_mcall0(cmd_arg_t *argv);
static cmd_arg_t mcall0_argv = {
.type = ARG_TYPE_STRING,
.buffer = call0_buf,
.len = sizeof(call0_buf)
};
static cmd_info_t mcall0_info = {
.name = "mcall0",
.description = "mcall0 <function> -> call function() on each CPU.",
.func = cmd_mcall0,
.argc = 1,
.argv = &mcall0_argv
};
 
/* Data and methods for 'call1' command. */
static int cmd_call1(cmd_arg_t *argv);
static cmd_arg_t call1_argv[] = {
406,6 → 438,7
 
static cmd_info_t *basic_commands[] = {
&call0_info,
&mcall0_info,
&call1_info,
&call2_info,
&call3_info,
413,6 → 446,8
&cpus_info,
&desc_info,
&exit_info,
&reboot_info,
&uptime_info,
&halt_info,
&help_info,
&ipc_task_info,
488,6 → 523,41
return 1;
}
 
 
/** Reboot the system.
*
* @param argv Argument vector.
*
* @return 0 on failure, 1 on success.
*/
int cmd_reboot(cmd_arg_t *argv)
{
reboot();
/* Not reached */
return 1;
}
 
 
/** Print system uptime information.
*
* @param argv Argument vector.
*
* @return 0 on failure, 1 on success.
*/
int cmd_uptime(cmd_arg_t *argv)
{
ASSERT(uptime);
/* This doesn't have to be very accurate */
unative_t sec = uptime->seconds1;
printf("Up %u days, %u hours, %u minutes, %u seconds\n",
sec / 86400, (sec % 86400) / 3600, (sec % 3600) / 60, sec % 60);
return 1;
}
 
/** Describe specified command.
*
* @param argv Argument vector.
540,7 → 610,7
struct {
unative_t f;
unative_t gp;
}fptr;
} fptr;
#endif
 
symaddr = get_symbol_addr((char *) argv->buffer);
551,7 → 621,7
printf("Duplicate symbol, be more specific.\n");
} else {
symbol = get_symtab_entry(symaddr);
printf("Calling f(): %.*p: %s\n", sizeof(uintptr_t) * 2, symaddr, symbol);
printf("Calling %s() (%.*p)\n", symbol, sizeof(uintptr_t) * 2, symaddr);
#ifdef ia64
fptr.f = symaddr;
fptr.gp = ((unative_t *)cmd_call2)[1];
565,6 → 635,35
return 1;
}
 
/** Call function with zero parameters on each CPU */
int cmd_mcall0(cmd_arg_t *argv)
{
/*
* For each CPU, create a thread which will
* call the function.
*/
count_t i;
for (i = 0; i < config.cpu_count; i++) {
if (!cpus[i].active)
continue;
thread_t *t;
if ((t = thread_create((void (*)(void *)) cmd_call0, (void *) argv, TASK, THREAD_FLAG_WIRED, "call0", false))) {
spinlock_lock(&t->lock);
t->cpu = &cpus[i];
spinlock_unlock(&t->lock);
printf("cpu%u: ", i);
thread_ready(t);
thread_join(t);
thread_detach(t);
} else
printf("Unable to create thread for cpu%u\n", i);
}
return 1;
}
 
/** Call function with one parameter */
int cmd_call1(cmd_arg_t *argv)
{
713,7 → 812,7
/** Write 4 byte value to address */
int cmd_set4(cmd_arg_t *argv)
{
uint32_t *addr ;
uint32_t *addr;
uint32_t arg1 = argv[1].intval;
bool pointer = false;
 
/branches/fs/kernel/generic/src/console/klog.c
90,8 → 90,6
ddi_parea_register(&klog_parea);
 
sysinfo_set_item_val("klog.faddr", NULL, (unative_t) faddr);
sysinfo_set_item_val("klog.fcolor", NULL, (unative_t)
PAGE_COLOR((uintptr_t) klog));
sysinfo_set_item_val("klog.pages", NULL, 1 << KLOG_ORDER);
sysinfo_set_item_val("klog.devno", NULL, devno);
sysinfo_set_item_val("klog.inr", NULL, KLOG_VIRT_INR);
/branches/fs/kernel/generic/src/proc/scheduler.c
207,7 → 207,7
 
interrupts_disable();
for (i = 0; i<RQ_COUNT; i++) {
for (i = 0; i < RQ_COUNT; i++) {
r = &CPU->rq[i];
spinlock_lock(&r->lock);
if (r->n == 0) {
377,7 → 377,8
void scheduler_separated_stack(void)
{
int priority;
DEADLOCK_PROBE_INIT(p_joinwq);
 
ASSERT(CPU != NULL);
if (THREAD) {
406,9 → 407,12
spinlock_unlock(&THREAD->lock);
delay(10);
spinlock_lock(&THREAD->lock);
DEADLOCK_PROBE(p_joinwq,
DEADLOCK_THRESHOLD);
goto repeat;
}
_waitq_wakeup_unsafe(&THREAD->join_wq, false);
_waitq_wakeup_unsafe(&THREAD->join_wq,
WAKEUP_FIRST);
spinlock_unlock(&THREAD->join_wq.lock);
THREAD->state = Undead;
447,8 → 451,8
/*
* Entering state is unexpected.
*/
panic("tid%d: unexpected state %s\n", THREAD->tid,
thread_states[THREAD->state]);
panic("tid%llu: unexpected state %s\n", THREAD->tid,
thread_states[THREAD->state]);
break;
}
 
500,7 → 504,7
THREAD->state = Running;
 
#ifdef SCHEDULER_VERBOSE
printf("cpu%d: tid %d (priority=%d, ticks=%lld, nrdy=%ld)\n",
printf("cpu%d: tid %llu (priority=%d, ticks=%llu, nrdy=%ld)\n",
CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks,
atomic_get(&CPU->nrdy));
#endif
568,7 → 572,7
* Searching least priority queues on all CPU's first and most priority
* queues on all CPU's last.
*/
for (j= RQ_COUNT - 1; j >= 0; j--) {
for (j = RQ_COUNT - 1; j >= 0; j--) {
for (i = 0; i < config.cpu_active; i++) {
link_t *l;
runq_t *r;
609,8 → 613,8
*/
spinlock_lock(&t->lock);
if ((!(t->flags & (THREAD_FLAG_WIRED |
THREAD_FLAG_STOLEN))) &&
(!(t->fpu_context_engaged)) ) {
THREAD_FLAG_STOLEN))) &&
(!(t->fpu_context_engaged))) {
/*
* Remove t from r.
*/
636,7 → 640,7
*/
spinlock_lock(&t->lock);
#ifdef KCPULB_VERBOSE
printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, "
printf("kcpulb%d: TID %llu -> cpu%d, nrdy=%ld, "
"avg=%nd\n", CPU->id, t->tid, CPU->id,
atomic_get(&CPU->nrdy),
atomic_get(&nrdy) / config.cpu_active);
719,7 → 723,7
for (cur = r->rq_head.next; cur != &r->rq_head;
cur = cur->next) {
t = list_get_instance(cur, thread_t, rq_link);
printf("%d(%s) ", t->tid,
printf("%llu(%s) ", t->tid,
thread_states[t->state]);
}
printf("\n");
/branches/fs/kernel/generic/src/proc/task.c
41,6 → 41,7
#include <proc/uarg.h>
#include <mm/as.h>
#include <mm/slab.h>
#include <atomic.h>
#include <synch/spinlock.h>
#include <synch/waitq.h>
#include <arch.h>
92,6 → 93,49
btree_create(&tasks_btree);
}
 
/** Kill all tasks except the current task.
*
*/
void task_done(void)
{
task_t *t;
do { /* Repeat until there are any tasks except TASK */
/* Messing with task structures, avoid deadlock */
ipl_t ipl = interrupts_disable();
spinlock_lock(&tasks_lock);
t = NULL;
link_t *cur;
for (cur = tasks_btree.leaf_head.next; cur != &tasks_btree.leaf_head; cur = cur->next) {
btree_node_t *node = list_get_instance(cur, btree_node_t, leaf_link);
unsigned int i;
for (i = 0; i < node->keys; i++) {
if ((task_t *) node->value[i] != TASK) {
t = (task_t *) node->value[i];
break;
}
}
}
if (t != NULL) {
task_id_t id = t->taskid;
spinlock_unlock(&tasks_lock);
interrupts_restore(ipl);
#ifdef CONFIG_DEBUG
printf("Killing task %llu\n", id);
#endif
task_kill(id);
} else {
spinlock_unlock(&tasks_lock);
interrupts_restore(ipl);
}
} while (t != NULL);
}
 
/** Create new task
*
140,11 → 184,8
 
/*
* Increment address space reference count.
* TODO: Reconsider the locking scheme.
*/
mutex_lock(&as->lock);
as->refcount++;
mutex_unlock(&as->lock);
atomic_inc(&as->refcount);
 
spinlock_lock(&tasks_lock);
 
166,15 → 207,8
task_destroy_arch(t);
btree_destroy(&t->futexes);
 
mutex_lock_active(&t->as->lock);
if (--t->as->refcount == 0) {
mutex_unlock(&t->as->lock);
if (atomic_predec(&t->as->refcount) == 0)
as_destroy(t->as);
/*
* t->as is destroyed.
*/
} else
mutex_unlock(&t->as->lock);
free(t);
TASK = NULL;
382,7 → 416,7
link_t *cur;
ipl_t ipl;
/* Messing with thread structures, avoid deadlock */
/* Messing with task structures, avoid deadlock */
ipl = interrupts_disable();
spinlock_lock(&tasks_lock);
408,7 → 442,7
char suffix;
order(task_get_accounting(t), &cycles, &suffix);
printf("%-6lld %-10s %-3ld %#10zx %#10zx %9llu%c %7zd "
printf("%-6llu %-10s %-3ld %#10zx %#10zx %9llu%c %7zd "
"%6zd", t->taskid, t->name, t->context, t, t->as,
cycles, suffix, t->refcount,
atomic_get(&t->active_calls));
495,7 → 529,7
 
ipc_cleanup();
futex_cleanup();
klog_printf("Cleanup of task %lld completed.", TASK->taskid);
klog_printf("Cleanup of task %llu completed.", TASK->taskid);
}
 
/** Kernel thread used to kill the userspace task when its main thread exits.
/branches/fs/kernel/generic/src/proc/thread.c
94,7 → 94,7
btree_t threads_btree;
 
SPINLOCK_INITIALIZE(tidlock);
uint32_t last_tid = 0;
thread_id_t last_tid = 0;
 
static slab_cache_t *thread_slab;
#ifdef ARCH_HAS_FPU
238,6 → 238,7
cpu = CPU;
if (t->flags & THREAD_FLAG_WIRED) {
ASSERT(t->cpu != NULL);
cpu = t->cpu;
}
t->state = Ready;
496,7 → 497,7
ipl_t ipl;
 
/*
* Since the thread is expected to not be already detached,
* Since the thread is expected not to be already detached,
* pointer to it must be still valid.
*/
ipl = interrupts_disable();
580,7 → 581,7
char suffix;
order(t->cycles, &cycles, &suffix);
printf("%-6zd %-10s %#10zx %-8s %#10zx %-3ld %#10zx "
printf("%-6llu %-10s %#10zx %-8s %#10zx %-3ld %#10zx "
"%#10zx %9llu%c ", t->tid, t->name, t,
thread_states[t->state], t->task, t->task->context,
t->thread_code, t->kstack, cycles, suffix);
636,12 → 637,11
/** Process syscall to create new thread.
*
*/
unative_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name)
unative_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name, thread_id_t *uspace_thread_id)
{
thread_t *t;
char namebuf[THREAD_NAME_BUFLEN];
uspace_arg_t *kernel_uarg;
uint32_t tid;
int rc;
 
rc = copy_from_uspace(namebuf, uspace_name, THREAD_NAME_BUFLEN);
658,12 → 658,14
t = thread_create(uinit, kernel_uarg, TASK, THREAD_FLAG_USPACE, namebuf,
false);
if (t) {
tid = t->tid;
thread_ready(t);
return (unative_t) tid;
} else {
if (uspace_thread_id != NULL)
return (unative_t) copy_to_uspace(uspace_thread_id, &t->tid,
sizeof(t->tid));
else
return 0;
} else
free(kernel_uarg);
}
 
return (unative_t) ENOMEM;
}
678,6 → 680,22
return 0;
}
 
/** Syscall for getting TID.
*
* @param uspace_thread_id Userspace address of 8-byte buffer where to store
* current thread ID.
*
* @return 0 on success or an error code from @ref errno.h.
*/
unative_t sys_thread_get_id(thread_id_t *uspace_thread_id)
{
/*
* No need to acquire lock on THREAD because tid
* remains constant for the lifespan of the thread.
*/
return (unative_t) copy_to_uspace(uspace_thread_id, &THREAD->tid,
sizeof(THREAD->tid));
}
 
/** @}
*/
 
/branches/fs/kernel/generic/src/lib/rd.c
49,7 → 49,7
 
int init_rd(rd_header * header, size_t size)
{
printf("Header magic %c%c%c%c\n",header->magic[0],header->magic[1],header->magic[2],header->magic[3]);
//printf("Header magic %c%c%c%c\n",header->magic[0],header->magic[1],header->magic[2],header->magic[3]);
 
/* Identify RAM disk */
if ((header->magic[0] != RD_MAG0) || (header->magic[1] != RD_MAG1) ||
56,14 → 56,14
(header->magic[2] != RD_MAG2) || (header->magic[3] != RD_MAG3))
return RE_INVALID;
 
printf("Header version %d\n",header->version);
printf("Expected version %d\n",RD_VERSION);
//printf("Header version %d\n",header->version);
//printf("Expected version %d\n",RD_VERSION);
/* Identify version */
if (header->version != RD_VERSION)
return RE_UNSUPPORTED;
 
printf("Header type %d\n",header->data_type);
//printf("Header type %d\n",header->data_type);
uint32_t hsize;
uint64_t dsize;
80,9 → 80,9
return RE_UNSUPPORTED;
}
 
printf("Header size %d\n",header->header_size);
printf("Data size %d\n",header->data_size);
printf("Size %d\n",size);
//printf("Header size %d\n",header->header_size);
//printf("Data size %d\n",header->data_size);
//printf("Size %d\n",size);
//jelen: does this make any sense? (we don't even care about the header size, do we?)
/*
98,7 → 98,6
if ((uint64_t) hsize + dsize > size)
dsize = size - hsize;
rd_parea.pbase = ALIGN_DOWN((uintptr_t)KA2PA((void *) header + hsize), FRAME_SIZE);
rd_parea.vbase = (uintptr_t) ((void *) header + hsize);
rd_parea.frames = SIZE2FRAMES(dsize);
111,8 → 110,6
sysinfo_set_item_val("rd.size", NULL, dsize);
sysinfo_set_item_val("rd.address.physical", NULL, (unative_t)
KA2PA((void *) header + hsize));
sysinfo_set_item_val("rd.address.color", NULL, (unative_t)
PAGE_COLOR((uintptr_t) header + hsize));
 
return RE_OK;
}
/branches/fs/kernel/generic/src/lib/memstr.c
130,7 → 130,8
{
char *orig = dest;
while ((*(dest++) = *(src++)));
while ((*(dest++) = *(src++)))
;
return orig;
}
 
/branches/fs/kernel/generic/src/lib/func.c
223,20 → 223,23
 
void order(const uint64_t val, uint64_t *rv, char *suffix)
{
if (val > 1000000000000000000LL) {
*rv = val / 1000000000000000LL;
if (val > 10000000000000000000ULL) {
*rv = val / 1000000000000000000ULL;
*suffix = 'Z';
} else if (val > 1000000000000000000ULL) {
*rv = val / 1000000000000000ULL;
*suffix = 'E';
} else if (val > 1000000000000000LL) {
*rv = val / 1000000000000LL;
} else if (val > 1000000000000000ULL) {
*rv = val / 1000000000000ULL;
*suffix = 'T';
} else if (val > 1000000000000LL) {
*rv = val / 1000000000LL;
} else if (val > 1000000000000ULL) {
*rv = val / 1000000000ULL;
*suffix = 'G';
} else if (val > 1000000000LL) {
*rv = val / 1000000LL;
} else if (val > 1000000000ULL) {
*rv = val / 1000000ULL;
*suffix = 'M';
} else if (val > 1000000LL) {
*rv = val / 1000LL;
} else if (val > 1000000ULL) {
*rv = val / 1000ULL;
*suffix = 'k';
} else {
*rv = val;
/branches/fs/kernel/generic/src/lib/objc.c
49,7 → 49,7
return class_create_instance(self);
}
 
- (id) free
- (id) dispose
{
return object_dispose(self);
}
/branches/fs/kernel/generic/src/adt/btree.c
970,7 → 970,7
 
printf("(");
for (i = 0; i < node->keys; i++) {
printf("%lld%s", node->key[i], i < node->keys - 1 ? "," : "");
printf("%llu%s", node->key[i], i < node->keys - 1 ? "," : "");
if (node->depth && node->subtree[i]) {
list_append(&node->subtree[i]->bfs_link, &head);
}
992,7 → 992,7
 
printf("(");
for (i = 0; i < node->keys; i++)
printf("%lld%s", node->key[i], i < node->keys - 1 ? "," : "");
printf("%llu%s", node->key[i], i < node->keys - 1 ? "," : "");
printf(")");
}
printf("\n");
/branches/fs/kernel/generic/src/mm/tlb.c
78,7 → 78,8
* @param page Virtual page address, if required by type.
* @param count Number of pages, if required by type.
*/
void tlb_shootdown_start(tlb_invalidate_type_t type, asid_t asid, uintptr_t page, count_t count)
void tlb_shootdown_start(tlb_invalidate_type_t type, asid_t asid,
uintptr_t page, count_t count)
{
int i;
 
107,11 → 108,11
/*
* Enqueue the message.
*/
cpu->tlb_messages[cpu->tlb_messages_count].type = type;
cpu->tlb_messages[cpu->tlb_messages_count].asid = asid;
cpu->tlb_messages[cpu->tlb_messages_count].page = page;
cpu->tlb_messages[cpu->tlb_messages_count].count = count;
cpu->tlb_messages_count++;
index_t idx = cpu->tlb_messages_count++;
cpu->tlb_messages[idx].type = type;
cpu->tlb_messages[idx].asid = asid;
cpu->tlb_messages[idx].page = page;
cpu->tlb_messages[idx].count = count;
}
spinlock_unlock(&cpu->lock);
}
/branches/fs/kernel/generic/src/mm/backend_anon.c
72,11 → 72,13
* @param addr Faulting virtual address.
* @param access Access mode that caused the fault (i.e. read/write/exec).
*
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e.
* serviced).
*/
int anon_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)
{
uintptr_t frame;
bool dirty = false;
 
if (!as_area_check_access(area, access))
return AS_PF_FAULT;
86,13 → 88,14
/*
* The area is shared, chances are that the mapping can be found
* in the pagemap of the address space area share info structure.
* in the pagemap of the address space area share info
* structure.
* In the case that the pagemap does not contain the respective
* mapping, a new frame is allocated and the mapping is created.
*/
mutex_lock(&area->sh_info->lock);
frame = (uintptr_t) btree_search(&area->sh_info->pagemap,
ALIGN_DOWN(addr, PAGE_SIZE) - area->base, &leaf);
ALIGN_DOWN(addr, PAGE_SIZE) - area->base, &leaf);
if (!frame) {
bool allocate = true;
int i;
102,7 → 105,8
* Just a small workaround.
*/
for (i = 0; i < leaf->keys; i++) {
if (leaf->key[i] == ALIGN_DOWN(addr, PAGE_SIZE)) {
if (leaf->key[i] ==
ALIGN_DOWN(addr, PAGE_SIZE)) {
allocate = false;
break;
}
110,11 → 114,15
if (allocate) {
frame = (uintptr_t) frame_alloc(ONE_FRAME, 0);
memsetb(PA2KA(frame), FRAME_SIZE, 0);
dirty = true;
/*
* Insert the address of the newly allocated frame to the pagemap.
* Insert the address of the newly allocated
* frame to the pagemap.
*/
btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base, (void *) frame, leaf);
btree_insert(&area->sh_info->pagemap,
ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
(void *) frame, leaf);
}
}
frame_reference_add(ADDR2PFN(frame));
137,12 → 145,13
*/
frame = (uintptr_t) frame_alloc(ONE_FRAME, 0);
memsetb(PA2KA(frame), FRAME_SIZE, 0);
dirty = true;
}
/*
* Map 'page' to 'frame'.
* Note that TLB shootdown is not attempted as only new information is being
* inserted into page tables.
* Note that TLB shootdown is not attempted as only new information is
* being inserted into page tables.
*/
page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
162,9 → 171,6
void anon_frame_free(as_area_t *area, uintptr_t page, uintptr_t frame)
{
frame_free(frame);
#ifdef CONFIG_VIRT_IDX_DCACHE
dcache_flush_frame(page, frame);
#endif
}
 
/** Share the anonymous address space area.
184,7 → 190,8
* Copy used portions of the area to sh_info's page map.
*/
mutex_lock(&area->sh_info->lock);
for (cur = area->used_space.leaf_head.next; cur != &area->used_space.leaf_head; cur = cur->next) {
for (cur = area->used_space.leaf_head.next;
cur != &area->used_space.leaf_head; cur = cur->next) {
btree_node_t *node;
int i;
198,14 → 205,19
pte_t *pte;
page_table_lock(area->as, false);
pte = page_mapping_find(area->as, base + j*PAGE_SIZE);
ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
btree_insert(&area->sh_info->pagemap, (base + j*PAGE_SIZE) - area->base,
(void *) PTE_GET_FRAME(pte), NULL);
pte = page_mapping_find(area->as,
base + j * PAGE_SIZE);
ASSERT(pte && PTE_VALID(pte) &&
PTE_PRESENT(pte));
btree_insert(&area->sh_info->pagemap,
(base + j * PAGE_SIZE) - area->base,
(void *) PTE_GET_FRAME(pte), NULL);
page_table_unlock(area->as, false);
frame_reference_add(ADDR2PFN(PTE_GET_FRAME(pte)));
 
pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte));
frame_reference_add(pfn);
}
 
}
}
mutex_unlock(&area->sh_info->lock);
/branches/fs/kernel/generic/src/mm/as.c
57,6 → 57,7
#include <genarch/mm/page_ht.h>
#include <mm/asid.h>
#include <arch/mm/asid.h>
#include <preemption.h>
#include <synch/spinlock.h>
#include <synch/mutex.h>
#include <adt/list.h>
95,10 → 96,13
#endif
 
/**
* This lock protects inactive_as_with_asid_head list. It must be acquired
* before as_t mutex.
* This lock serializes access to the ASID subsystem.
* It protects:
* - inactive_as_with_asid_head list
* - as->asid for each as of the as_t type
* - asids_allocated counter
*/
SPINLOCK_INITIALIZE(inactive_as_with_asid_lock);
SPINLOCK_INITIALIZE(asidlock);
 
/**
* This list contains address spaces that are not active on any
178,7 → 182,7
else
as->asid = ASID_INVALID;
as->refcount = 0;
atomic_set(&as->refcount, 0);
as->cpu_refcount = 0;
#ifdef AS_PAGE_TABLE
as->genarch.page_table = page_table_create(flags);
193,26 → 197,45
*
* When there are no tasks referencing this address space (i.e. its refcount is
* zero), the address space can be destroyed.
*
* We know that we don't hold any spinlock.
*/
void as_destroy(as_t *as)
{
ipl_t ipl;
bool cond;
DEADLOCK_PROBE_INIT(p_asidlock);
 
ASSERT(as->refcount == 0);
ASSERT(atomic_get(&as->refcount) == 0);
/*
* Since there is no reference to this area,
* it is safe not to lock its mutex.
*/
ipl = interrupts_disable();
spinlock_lock(&inactive_as_with_asid_lock);
 
/*
* We need to avoid deadlock between TLB shootdown and asidlock.
* We therefore try to take asid conditionally and if we don't succeed,
* we enable interrupts and try again. This is done while preemption is
* disabled to prevent nested context switches. We also depend on the
* fact that so far no spinlocks are held.
*/
preemption_disable();
ipl = interrupts_read();
retry:
interrupts_disable();
if (!spinlock_trylock(&asidlock)) {
interrupts_enable();
DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD);
goto retry;
}
preemption_enable(); /* Interrupts disabled, enable preemption */
if (as->asid != ASID_INVALID && as != AS_KERNEL) {
if (as != AS && as->cpu_refcount == 0)
list_remove(&as->inactive_as_with_asid_link);
asid_put(as->asid);
}
spinlock_unlock(&inactive_as_with_asid_lock);
spinlock_unlock(&asidlock);
 
/*
* Destroy address space areas of the address space.
411,7 → 434,7
int i = 0;
if (overlaps(b, c * PAGE_SIZE, area->base,
pages*PAGE_SIZE)) {
pages * PAGE_SIZE)) {
if (b + c * PAGE_SIZE <= start_free) {
/*
468,15 → 491,16
/*
* Finish TLB shootdown sequence.
*/
 
tlb_invalidate_pages(as->asid, area->base + pages * PAGE_SIZE,
area->pages - pages);
tlb_shootdown_finalize();
/*
* Invalidate software translation caches (e.g. TSB on sparc64).
*/
as_invalidate_translation_cache(as, area->base +
pages * PAGE_SIZE, area->pages - pages);
tlb_shootdown_finalize();
} else {
/*
* Growing the area.
553,7 → 577,7
if (area->backend &&
area->backend->frame_free) {
area->backend->frame_free(area, b +
j * PAGE_SIZE, PTE_GET_FRAME(pte));
j * PAGE_SIZE, PTE_GET_FRAME(pte));
}
page_mapping_remove(as, b + j * PAGE_SIZE);
page_table_unlock(as, false);
564,14 → 588,14
/*
* Finish TLB shootdown sequence.
*/
 
tlb_invalidate_pages(as->asid, area->base, area->pages);
tlb_shootdown_finalize();
/*
* Invalidate potential software translation caches (e.g. TSB on
* sparc64).
*/
as_invalidate_translation_cache(as, area->base, area->pages);
tlb_shootdown_finalize();
btree_destroy(&area->used_space);
 
613,8 → 637,7
* such address space area, EPERM if there was a problem in accepting the area
* or ENOMEM if there was a problem in allocating destination address space
* area. ENOTSUP is returned if the address space area backend does not support
* sharing or if the kernel detects an attempt to create an illegal address
* alias.
* sharing.
*/
int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
as_t *dst_as, uintptr_t dst_base, int dst_flags_mask)
667,20 → 690,6
return EPERM;
}
 
#ifdef CONFIG_VIRT_IDX_DCACHE
if (!(dst_flags_mask & AS_AREA_EXEC)) {
if (PAGE_COLOR(src_area->base) != PAGE_COLOR(dst_base)) {
/*
* Refuse to create an illegal address alias.
*/
mutex_unlock(&src_area->lock);
mutex_unlock(&src_as->lock);
interrupts_restore(ipl);
return ENOTSUP;
}
}
#endif /* CONFIG_VIRT_IDX_DCACHE */
 
/*
* Now we are committed to sharing the area.
* First, prepare the area for sharing.
875,24 → 884,37
/** Switch address spaces.
*
* Note that this function cannot sleep as it is essentially a part of
* scheduling. Sleeping here would lead to deadlock on wakeup.
* scheduling. Sleeping here would lead to deadlock on wakeup. Another
* thing which is forbidden in this context is locking the address space.
*
* When this function is enetered, no spinlocks may be held.
*
* @param old Old address space or NULL.
* @param new New address space.
*/
void as_switch(as_t *old_as, as_t *new_as)
{
ipl_t ipl;
bool needs_asid = false;
ipl = interrupts_disable();
spinlock_lock(&inactive_as_with_asid_lock);
DEADLOCK_PROBE_INIT(p_asidlock);
preemption_disable();
retry:
(void) interrupts_disable();
if (!spinlock_trylock(&asidlock)) {
/*
* Avoid deadlock with TLB shootdown.
* We can enable interrupts here because
* preemption is disabled. We should not be
* holding any other lock.
*/
(void) interrupts_enable();
DEADLOCK_PROBE(p_asidlock, DEADLOCK_THRESHOLD);
goto retry;
}
preemption_enable();
 
/*
* First, take care of the old address space.
*/
if (old_as) {
mutex_lock_active(&old_as->lock);
ASSERT(old_as->cpu_refcount);
if((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) {
/*
901,11 → 923,10
* list of inactive address spaces with assigned
* ASID.
*/
ASSERT(old_as->asid != ASID_INVALID);
list_append(&old_as->inactive_as_with_asid_link,
&inactive_as_with_asid_head);
ASSERT(old_as->asid != ASID_INVALID);
list_append(&old_as->inactive_as_with_asid_link,
&inactive_as_with_asid_head);
}
mutex_unlock(&old_as->lock);
 
/*
* Perform architecture-specific tasks when the address space
917,36 → 938,15
/*
* Second, prepare the new address space.
*/
mutex_lock_active(&new_as->lock);
if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) {
if (new_as->asid != ASID_INVALID) {
if (new_as->asid != ASID_INVALID)
list_remove(&new_as->inactive_as_with_asid_link);
} else {
/*
* Defer call to asid_get() until new_as->lock is released.
*/
needs_asid = true;
}
else
new_as->asid = asid_get();
}
#ifdef AS_PAGE_TABLE
SET_PTL0_ADDRESS(new_as->genarch.page_table);
#endif
mutex_unlock(&new_as->lock);
 
if (needs_asid) {
/*
* Allocation of new ASID was deferred
* until now in order to avoid deadlock.
*/
asid_t asid;
asid = asid_get();
mutex_lock_active(&new_as->lock);
new_as->asid = asid;
mutex_unlock(&new_as->lock);
}
spinlock_unlock(&inactive_as_with_asid_lock);
interrupts_restore(ipl);
/*
* Perform architecture-specific steps.
953,6 → 953,8
* (e.g. write ASID to hardware register etc.)
*/
as_install_arch(new_as);
 
spinlock_unlock(&asidlock);
AS = new_as;
}
/branches/fs/kernel/generic/src/mm/backend_phys.c
32,7 → 32,8
 
/**
* @file
* @brief Backend for address space areas backed by continuous physical memory.
* @brief Backend for address space areas backed by continuous physical
* memory.
*/
 
#include <debug.h>
62,7 → 63,8
* @param addr Faulting virtual address.
* @param access Access mode that caused the fault (i.e. read/write/exec).
*
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e.
* serviced).
*/
int phys_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)
{
72,7 → 74,8
return AS_PF_FAULT;
 
ASSERT(addr - area->base < area->backend_data.frames * FRAME_SIZE);
page_mapping_insert(AS, addr, base + (addr - area->base), as_area_get_flags(area));
page_mapping_insert(AS, addr, base + (addr - area->base),
as_area_get_flags(area));
if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
panic("Could not insert used space.\n");
 
/branches/fs/kernel/generic/src/mm/frame.c
70,16 → 70,19
typedef struct {
count_t refcount; /**< tracking of shared frames */
uint8_t buddy_order; /**< buddy system block order */
link_t buddy_link; /**< link to the next free block inside one order */
link_t buddy_link; /**< link to the next free block inside one
order */
void *parent; /**< If allocated by slab, this points there */
} frame_t;
 
typedef struct {
SPINLOCK_DECLARE(lock); /**< this lock protects everything below */
pfn_t base; /**< frame_no of the first frame in the frames array */
pfn_t base; /**< frame_no of the first frame in the frames
array */
count_t count; /**< Size of zone */
 
frame_t *frames; /**< array of frame_t structures in this zone */
frame_t *frames; /**< array of frame_t structures in this
zone */
count_t free_count; /**< number of free frame_t structures */
count_t busy_count; /**< number of busy frame_t structures */
157,8 → 160,8
for (i = 0; i < zones.count; i++) {
/* Check for overflow */
z = zones.info[i];
if (overlaps(newzone->base,newzone->count,
z->base, z->count)) {
if (overlaps(newzone->base,newzone->count, z->base,
z->count)) {
printf("Zones overlap!\n");
return -1;
}
166,7 → 169,7
break;
}
/* Move other zones up */
for (j = i;j < zones.count; j++)
for (j = i; j < zones.count; j++)
zones.info[j + 1] = zones.info[j];
zones.info[i] = newzone;
zones.count++;
202,7 → 205,8
z = zones.info[i];
spinlock_lock(&z->lock);
if (z->base <= frame && z->base + z->count > frame) {
spinlock_unlock(&zones.lock); /* Unlock the global lock */
/* Unlock the global lock */
spinlock_unlock(&zones.lock);
if (pzone)
*pzone = i;
return z;
229,7 → 233,8
* Assume interrupts are disabled.
*
* @param order Size (2^order) of free space we are trying to find
* @param pzone Pointer to preferred zone or NULL, on return contains zone number
* @param pzone Pointer to preferred zone or NULL, on return contains zone
* number
*/
static zone_t * find_free_zone_and_lock(uint8_t order, unsigned int *pzone)
{
273,10 → 278,10
* @param order - Order of parent must be different then this parameter!!
*/
static link_t *zone_buddy_find_block(buddy_system_t *b, link_t *child,
uint8_t order)
uint8_t order)
{
frame_t * frame;
zone_t * zone;
frame_t *frame;
zone_t *zone;
index_t index;
frame = list_get_instance(child, frame_t, buddy_link);
293,8 → 298,8
 
static void zone_buddy_print_id(buddy_system_t *b, link_t *block)
{
frame_t * frame;
zone_t * zone;
frame_t *frame;
zone_t *zone;
index_t index;
 
frame = list_get_instance(block, frame_t, buddy_link);
310,16 → 315,17
*
* @return Buddy for given block if found
*/
static link_t * zone_buddy_find_buddy(buddy_system_t *b, link_t * block)
static link_t *zone_buddy_find_buddy(buddy_system_t *b, link_t *block)
{
frame_t * frame;
zone_t * zone;
frame_t *frame;
zone_t *zone;
index_t index;
bool is_left, is_right;
 
frame = list_get_instance(block, frame_t, buddy_link);
zone = (zone_t *) b->data;
ASSERT(IS_BUDDY_ORDER_OK(frame_index_abs(zone, frame), frame->buddy_order));
ASSERT(IS_BUDDY_ORDER_OK(frame_index_abs(zone, frame),
frame->buddy_order));
is_left = IS_BUDDY_LEFT_BLOCK_ABS(zone, frame);
is_right = IS_BUDDY_RIGHT_BLOCK_ABS(zone, frame);
348,8 → 354,8
*
* @return right block
*/
static link_t * zone_buddy_bisect(buddy_system_t *b, link_t * block) {
frame_t * frame_l, * frame_r;
static link_t * zone_buddy_bisect(buddy_system_t *b, link_t *block) {
frame_t *frame_l, *frame_r;
 
frame_l = list_get_instance(block, frame_t, buddy_link);
frame_r = (frame_l + (1 << (frame_l->buddy_order - 1)));
365,8 → 371,8
*
* @return Coalesced block (actually block that represents lower address)
*/
static link_t * zone_buddy_coalesce(buddy_system_t *b, link_t * block_1,
link_t * block_2)
static link_t *zone_buddy_coalesce(buddy_system_t *b, link_t *block_1,
link_t *block_2)
{
frame_t *frame1, *frame2;
382,8 → 388,9
* @param block Buddy system block
* @param order Order to set
*/
static void zone_buddy_set_order(buddy_system_t *b, link_t * block, uint8_t order) {
frame_t * frame;
static void zone_buddy_set_order(buddy_system_t *b, link_t *block,
uint8_t order) {
frame_t *frame;
frame = list_get_instance(block, frame_t, buddy_link);
frame->buddy_order = order;
}
395,8 → 402,8
*
* @return Order of block
*/
static uint8_t zone_buddy_get_order(buddy_system_t *b, link_t * block) {
frame_t * frame;
static uint8_t zone_buddy_get_order(buddy_system_t *b, link_t *block) {
frame_t *frame;
frame = list_get_instance(block, frame_t, buddy_link);
return frame->buddy_order;
}
420,8 → 427,8
* @param block Buddy system block
*
*/
static void zone_buddy_mark_available(buddy_system_t *b, link_t * block) {
frame_t * frame;
static void zone_buddy_mark_available(buddy_system_t *b, link_t *block) {
frame_t *frame;
frame = list_get_instance(block, frame_t, buddy_link);
frame->refcount = 0;
}
520,8 → 527,8
frame = zone_get_frame(zone, frame_idx);
if (frame->refcount)
return;
link = buddy_system_alloc_block(zone->buddy_system,
&frame->buddy_link);
link = buddy_system_alloc_block(zone->buddy_system,
&frame->buddy_link);
ASSERT(link);
zone->free_count--;
}
545,12 → 552,12
pfn_t frame_idx;
frame_t *frame;
 
ASSERT(!overlaps(z1->base,z1->count,z2->base,z2->count));
ASSERT(!overlaps(z1->base, z1->count, z2->base, z2->count));
ASSERT(z1->base < z2->base);
 
spinlock_initialize(&z->lock, "zone_lock");
z->base = z1->base;
z->count = z2->base+z2->count - z1->base;
z->count = z2->base + z2->count - z1->base;
z->flags = z1->flags & z2->flags;
 
z->free_count = z1->free_count + z2->free_count;
558,12 → 565,12
max_order = fnzb(z->count);
 
z->buddy_system = (buddy_system_t *)&z[1];
buddy_system_create(z->buddy_system, max_order,
&zone_buddy_system_operations,
(void *) z);
z->buddy_system = (buddy_system_t *) &z[1];
buddy_system_create(z->buddy_system, max_order,
&zone_buddy_system_operations, (void *) z);
 
z->frames = (frame_t *)((uint8_t *) z->buddy_system + buddy_conf_size(max_order));
z->frames = (frame_t *)((uint8_t *) z->buddy_system +
buddy_conf_size(max_order));
for (i = 0; i < z->count; i++) {
/* This marks all frames busy */
frame_initialize(&z->frames[i]);
603,7 → 610,7
}
while (zone_can_alloc(z2, 0)) {
frame_idx = zone_frame_alloc(z2, 0);
frame = &z->frames[frame_idx + (z2->base-z1->base)];
frame = &z->frames[frame_idx + (z2->base - z1->base)];
frame->refcount = 0;
buddy_system_free(z->buddy_system, &frame->buddy_link);
}
668,7 → 675,7
for (i = 0; i < (count_t) (1 << order); i++) {
frame = &zone->frames[i + frame_idx];
frame->buddy_order = 0;
if (! frame->refcount)
if (!frame->refcount)
frame->refcount = 1;
ASSERT(frame->refcount == 1);
}
710,7 → 717,8
spinlock_lock(&zone1->lock);
spinlock_lock(&zone2->lock);
 
cframes = SIZE2FRAMES(zone_conf_size(zone2->base+zone2->count-zone1->base));
cframes = SIZE2FRAMES(zone_conf_size(zone2->base + zone2->count -
zone1->base));
if (cframes == 1)
order = 0;
else
803,7 → 811,8
/* Allocate frames _after_ the conframe */
/* Check sizes */
z->frames = (frame_t *)((uint8_t *) z->buddy_system + buddy_conf_size(max_order));
z->frames = (frame_t *)((uint8_t *) z->buddy_system +
buddy_conf_size(max_order));
for (i = 0; i < count; i++) {
frame_initialize(&z->frames[i]);
}
865,16 → 874,20
if (confframe >= start && confframe < start+count) {
for (;confframe < start + count; confframe++) {
addr = PFN2ADDR(confframe);
if (overlaps(addr, PFN2ADDR(confcount), KA2PA(config.base), config.kernel_size))
if (overlaps(addr, PFN2ADDR(confcount),
KA2PA(config.base), config.kernel_size))
continue;
if (overlaps(addr, PFN2ADDR(confcount), KA2PA(config.stack_base), config.stack_size))
if (overlaps(addr, PFN2ADDR(confcount),
KA2PA(config.stack_base), config.stack_size))
continue;
bool overlap = false;
count_t i;
for (i = 0; i < init.cnt; i++)
if (overlaps(addr, PFN2ADDR(confcount), KA2PA(init.tasks[i].addr), init.tasks[i].size)) {
if (overlaps(addr, PFN2ADDR(confcount),
KA2PA(init.tasks[i].addr),
init.tasks[i].size)) {
overlap = true;
break;
}
915,7 → 928,7
spinlock_unlock(&zone->lock);
}
 
void * frame_get_parent(pfn_t pfn, unsigned int hint)
void *frame_get_parent(pfn_t pfn, unsigned int hint)
{
zone_t *zone = find_zone_and_lock(pfn, &hint);
void *res;
1073,15 → 1086,21
/* Tell the architecture to create some memory */
frame_arch_init();
if (config.cpu_active == 1) {
frame_mark_unavailable(ADDR2PFN(KA2PA(config.base)), SIZE2FRAMES(config.kernel_size));
frame_mark_unavailable(ADDR2PFN(KA2PA(config.stack_base)), SIZE2FRAMES(config.stack_size));
frame_mark_unavailable(ADDR2PFN(KA2PA(config.base)),
SIZE2FRAMES(config.kernel_size));
frame_mark_unavailable(ADDR2PFN(KA2PA(config.stack_base)),
SIZE2FRAMES(config.stack_size));
count_t i;
for (i = 0; i < init.cnt; i++)
frame_mark_unavailable(ADDR2PFN(KA2PA(init.tasks[i].addr)), SIZE2FRAMES(init.tasks[i].size));
for (i = 0; i < init.cnt; i++) {
pfn_t pfn = ADDR2PFN(KA2PA(init.tasks[i].addr));
frame_mark_unavailable(pfn,
SIZE2FRAMES(init.tasks[i].size));
}
 
if (ballocs.size)
frame_mark_unavailable(ADDR2PFN(KA2PA(ballocs.base)), SIZE2FRAMES(ballocs.size));
frame_mark_unavailable(ADDR2PFN(KA2PA(ballocs.base)),
SIZE2FRAMES(ballocs.size));
 
/* Black list first frame, as allocating NULL would
* fail in some places */
1106,7 → 1125,8
for (i = 0; i < zones.count; i++) {
zone = zones.info[i];
spinlock_lock(&zone->lock);
printf("%-2d %12p %12zd %12zd\n", i, PFN2ADDR(zone->base), zone->free_count, zone->busy_count);
printf("%-2d %12p %12zd %12zd\n", i, PFN2ADDR(zone->base),
zone->free_count, zone->busy_count);
spinlock_unlock(&zone->lock);
}
spinlock_unlock(&zones.lock);
1138,10 → 1158,14
spinlock_lock(&zone->lock);
printf("Memory zone information\n");
printf("Zone base address: %#.*p\n", sizeof(uintptr_t) * 2, PFN2ADDR(zone->base));
printf("Zone size: %zd frames (%zdK)\n", zone->count, ((zone->count) * FRAME_SIZE) >> 10);
printf("Allocated space: %zd frames (%zdK)\n", zone->busy_count, (zone->busy_count * FRAME_SIZE) >> 10);
printf("Available space: %zd frames (%zdK)\n", zone->free_count, (zone->free_count * FRAME_SIZE) >> 10);
printf("Zone base address: %#.*p\n", sizeof(uintptr_t) * 2,
PFN2ADDR(zone->base));
printf("Zone size: %zd frames (%zdK)\n", zone->count,
((zone->count) * FRAME_SIZE) >> 10);
printf("Allocated space: %zd frames (%zdK)\n", zone->busy_count,
(zone->busy_count * FRAME_SIZE) >> 10);
printf("Available space: %zd frames (%zdK)\n", zone->free_count,
(zone->free_count * FRAME_SIZE) >> 10);
buddy_system_structure_print(zone->buddy_system, FRAME_SIZE);
spinlock_unlock(&zone->lock);
1152,3 → 1176,4
 
/** @}
*/
 
/branches/fs/kernel/generic/src/mm/page.c
76,7 → 76,7
cnt = length / PAGE_SIZE + (length % PAGE_SIZE > 0);
 
for (i = 0; i < cnt; i++)
page_mapping_insert(AS_KERNEL, s + i * PAGE_SIZE, s + i * PAGE_SIZE, PAGE_NOT_CACHEABLE);
page_mapping_insert(AS_KERNEL, s + i * PAGE_SIZE, s + i * PAGE_SIZE, PAGE_NOT_CACHEABLE | PAGE_WRITE);
 
}
 
/branches/fs/kernel/generic/src/mm/backend_elf.c
71,7 → 71,8
* @param addr Faulting virtual address.
* @param access Access mode that caused the fault (i.e. read/write/exec).
*
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e. serviced).
* @return AS_PF_FAULT on failure (i.e. page fault) or AS_PF_OK on success (i.e.
* serviced).
*/
int elf_page_fault(as_area_t *area, uintptr_t addr, pf_access_t access)
{
80,11 → 81,13
btree_node_t *leaf;
uintptr_t base, frame;
index_t i;
bool dirty = false;
 
if (!as_area_check_access(area, access))
return AS_PF_FAULT;
 
ASSERT((addr >= entry->p_vaddr) && (addr < entry->p_vaddr + entry->p_memsz));
ASSERT((addr >= entry->p_vaddr) &&
(addr < entry->p_vaddr + entry->p_memsz));
i = (addr - entry->p_vaddr) >> PAGE_WIDTH;
base = (uintptr_t) (((void *) elf) + entry->p_offset);
ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
107,7 → 110,8
*/
 
for (i = 0; i < leaf->keys; i++) {
if (leaf->key[i] == ALIGN_DOWN(addr, PAGE_SIZE)) {
if (leaf->key[i] ==
ALIGN_DOWN(addr, PAGE_SIZE)) {
found = true;
break;
}
115,8 → 119,10
}
if (frame || found) {
frame_reference_add(ADDR2PFN(frame));
page_mapping_insert(AS, addr, frame, as_area_get_flags(area));
if (!used_space_insert(area, ALIGN_DOWN(addr, PAGE_SIZE), 1))
page_mapping_insert(AS, addr, frame,
as_area_get_flags(area));
if (!used_space_insert(area,
ALIGN_DOWN(addr, PAGE_SIZE), 1))
panic("Could not insert used space.\n");
mutex_unlock(&area->sh_info->lock);
return AS_PF_OK;
124,10 → 130,12
}
/*
* The area is either not shared or the pagemap does not contain the mapping.
* The area is either not shared or the pagemap does not contain the
* mapping.
*/
if (ALIGN_DOWN(addr, PAGE_SIZE) + PAGE_SIZE < entry->p_vaddr + entry->p_filesz) {
if (ALIGN_DOWN(addr, PAGE_SIZE) + PAGE_SIZE <
entry->p_vaddr + entry->p_filesz) {
/*
* Initialized portion of the segment. The memory is backed
* directly by the content of the ELF image. Pages are
138,18 → 146,22
*/
if (entry->p_flags & PF_W) {
frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), FRAME_SIZE);
memcpy((void *) PA2KA(frame),
(void *) (base + i * FRAME_SIZE), FRAME_SIZE);
dirty = true;
 
if (area->sh_info) {
frame_reference_add(ADDR2PFN(frame));
btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
(void *) frame, leaf);
btree_insert(&area->sh_info->pagemap,
ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
(void *) frame, leaf);
}
 
} else {
frame = KA2PA(base + i*FRAME_SIZE);
}
} else if (ALIGN_DOWN(addr, PAGE_SIZE) >= ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
} else if (ALIGN_DOWN(addr, PAGE_SIZE) >=
ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
/*
* This is the uninitialized portion of the segment.
* It is not physically present in the ELF image.
158,11 → 170,13
*/
frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
memsetb(PA2KA(frame), FRAME_SIZE, 0);
dirty = true;
 
if (area->sh_info) {
frame_reference_add(ADDR2PFN(frame));
btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
(void *) frame, leaf);
btree_insert(&area->sh_info->pagemap,
ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
(void *) frame, leaf);
}
 
} else {
175,12 → 189,15
size = entry->p_filesz - (i<<PAGE_WIDTH);
frame = (uintptr_t)frame_alloc(ONE_FRAME, 0);
memsetb(PA2KA(frame) + size, FRAME_SIZE - size, 0);
memcpy((void *) PA2KA(frame), (void *) (base + i*FRAME_SIZE), size);
memcpy((void *) PA2KA(frame), (void *) (base + i * FRAME_SIZE),
size);
dirty = true;
 
if (area->sh_info) {
frame_reference_add(ADDR2PFN(frame));
btree_insert(&area->sh_info->pagemap, ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
(void *) frame, leaf);
btree_insert(&area->sh_info->pagemap,
ALIGN_DOWN(addr, PAGE_SIZE) - area->base,
(void *) frame, leaf);
}
 
}
211,31 → 228,28
uintptr_t base;
index_t i;
ASSERT((page >= entry->p_vaddr) && (page < entry->p_vaddr + entry->p_memsz));
ASSERT((page >= entry->p_vaddr) &&
(page < entry->p_vaddr + entry->p_memsz));
i = (page - entry->p_vaddr) >> PAGE_WIDTH;
base = (uintptr_t) (((void *) elf) + entry->p_offset);
ASSERT(ALIGN_UP(base, FRAME_SIZE) == base);
if (page + PAGE_SIZE < ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
if (page + PAGE_SIZE <
ALIGN_UP(entry->p_vaddr + entry->p_filesz, PAGE_SIZE)) {
if (entry->p_flags & PF_W) {
/*
* Free the frame with the copy of writable segment data.
* Free the frame with the copy of writable segment
* data.
*/
frame_free(frame);
#ifdef CONFIG_VIRT_IDX_DCACHE
dcache_flush_frame(page, frame);
#endif
}
} else {
/*
* The frame is either anonymous memory or the mixed case (i.e. lower
* part is backed by the ELF image and the upper is anonymous).
* In any case, a frame needs to be freed.
*/
frame_free(frame);
#ifdef CONFIG_VIRT_IDX_DCACHE
dcache_flush_frame(page, frame);
#endif
* The frame is either anonymous memory or the mixed case (i.e.
* lower part is backed by the ELF image and the upper is
* anonymous). In any case, a frame needs to be freed.
*/
frame_free(frame);
}
}
 
260,10 → 274,12
* Find the node in which to start linear search.
*/
if (area->flags & AS_AREA_WRITE) {
node = list_get_instance(area->used_space.leaf_head.next, btree_node_t, leaf_link);
node = list_get_instance(area->used_space.leaf_head.next,
btree_node_t, leaf_link);
} else {
(void) btree_search(&area->sh_info->pagemap, start_anon, &leaf);
node = btree_leaf_node_left_neighbour(&area->sh_info->pagemap, leaf);
node = btree_leaf_node_left_neighbour(&area->sh_info->pagemap,
leaf);
if (!node)
node = leaf;
}
272,7 → 288,8
* Copy used anonymous portions of the area to sh_info's page map.
*/
mutex_lock(&area->sh_info->lock);
for (cur = &node->leaf_link; cur != &area->used_space.leaf_head; cur = cur->next) {
for (cur = &node->leaf_link; cur != &area->used_space.leaf_head;
cur = cur->next) {
int i;
node = list_get_instance(cur, btree_node_t, leaf_link);
294,19 → 311,26
pte_t *pte;
/*
* Skip read-only pages that are backed by the ELF image.
* Skip read-only pages that are backed by the
* ELF image.
*/
if (!(area->flags & AS_AREA_WRITE))
if (base + (j + 1)*PAGE_SIZE <= start_anon)
if (base + (j + 1) * PAGE_SIZE <=
start_anon)
continue;
page_table_lock(area->as, false);
pte = page_mapping_find(area->as, base + j*PAGE_SIZE);
ASSERT(pte && PTE_VALID(pte) && PTE_PRESENT(pte));
btree_insert(&area->sh_info->pagemap, (base + j*PAGE_SIZE) - area->base,
pte = page_mapping_find(area->as,
base + j * PAGE_SIZE);
ASSERT(pte && PTE_VALID(pte) &&
PTE_PRESENT(pte));
btree_insert(&area->sh_info->pagemap,
(base + j * PAGE_SIZE) - area->base,
(void *) PTE_GET_FRAME(pte), NULL);
page_table_unlock(area->as, false);
frame_reference_add(ADDR2PFN(PTE_GET_FRAME(pte)));
 
pfn_t pfn = ADDR2PFN(PTE_GET_FRAME(pte));
frame_reference_add(pfn);
}
}
/branches/fs/kernel/generic/src/syscall/syscall.c
100,7 → 100,7
if (id < SYSCALL_END)
rc = syscall_table[id](a1, a2, a3, a4);
else {
klog_printf("TASK %lld: Unknown syscall id %d",TASK->taskid,id);
klog_printf("TASK %llu: Unknown syscall id %d",TASK->taskid,id);
task_kill(TASK->taskid);
thread_exit();
}
118,6 → 118,7
/* Thread and task related syscalls. */
(syshandler_t) sys_thread_create,
(syshandler_t) sys_thread_exit,
(syshandler_t) sys_thread_get_id,
(syshandler_t) sys_task_get_id,
/* Synchronization related syscalls. */
/branches/fs/kernel/generic/src/ipc/ipcrsc.c
34,8 → 34,8
 
/* IPC resources management
*
* The goal of this source code is to properly manage IPC resources
* and allow straight and clean clean-up procedure upon task termination.
* The goal of this source code is to properly manage IPC resources and allow
* straight and clean clean-up procedure upon task termination.
*
* The pattern of usage of the resources is:
* - allocate empty phone slot, connect | deallocate slot
47,24 → 47,24
*
* Locking strategy
*
* - To use a phone, disconnect a phone etc., the phone must be
* first locked and then checked that it is connected
* - To connect an allocated phone it need not be locked (assigning
* pointer is atomic on all platforms)
* - To use a phone, disconnect a phone etc., the phone must be first locked and
* then checked that it is connected
* - To connect an allocated phone it need not be locked (assigning pointer is
* atomic on all platforms)
*
* - To find an empty phone slot, the TASK must be locked
* - To answer a message, the answerbox must be locked
* - The locking of phone and answerbox is done at the ipc_ level.
* It is perfectly correct to pass unconnected phone to these functions
* and proper reply will be generated.
* It is perfectly correct to pass unconnected phone to these functions and
* proper reply will be generated.
*
* Locking order
*
* - first phone, then answerbox
* + Easy locking on calls
* - Very hard traversing list of phones when disconnecting because
* the phones may disconnect during traversal of list of connected phones.
* The only possibility is try_lock with restart of list traversal.
* - Very hard traversing list of phones when disconnecting because the phones
* may disconnect during traversal of list of connected phones. The only
* possibility is try_lock with restart of list traversal.
*
* Destroying is less frequent, this approach is taken.
*
71,24 → 71,23
* Phone call
*
* *** Connect_me_to ***
* The caller sends IPC_M_CONNECT_ME_TO to an answerbox. The server
* receives 'phoneid' of the connecting phone as an ARG3. If it answers
* with RETVAL=0, the phonecall is accepted, otherwise it is refused.
* The caller sends IPC_M_CONNECT_ME_TO to an answerbox. The server receives
* 'phoneid' of the connecting phone as an ARG3. If it answers with RETVAL=0,
* the phonecall is accepted, otherwise it is refused.
*
* *** Connect_to_me ***
* The caller sends IPC_M_CONNECT_TO_ME, with special
* The server receives an automatically
* opened phoneid. If it accepts (RETVAL=0), it can use the phoneid
* immediately.
* Possible race condition can arise, when the client receives messages
* from new connection before getting response for connect_to_me message.
* Userspace should implement handshake protocol that would control it.
* The caller sends IPC_M_CONNECT_TO_ME.
* The server receives an automatically opened phoneid. If it accepts
* (RETVAL=0), it can use the phoneid immediately.
* Possible race condition can arise, when the client receives messages from new
* connection before getting response for connect_to_me message. Userspace
* should implement handshake protocol that would control it.
*
* Phone hangup
*
* *** The caller hangs up (sys_ipc_hangup) ***
* - The phone is disconnected (no more messages can be sent over this phone),
* all in-progress messages are correctly handled. The anwerbox receives
* all in-progress messages are correctly handled. The answerbox receives
* IPC_M_PHONE_HUNGUP call from the phone that hung up. When all async
* calls are answered, the phone is deallocated.
*
/branches/fs/kernel/generic/src/ipc/sysipc.c
49,12 → 49,12
#include <mm/as.h>
#include <print.h>
 
#define GET_CHECK_PHONE(phone,phoneid,err) { \
#define GET_CHECK_PHONE(phone, phoneid, err) { \
if (phoneid > IPC_MAX_PHONES) { err; } \
phone = &TASK->phones[phoneid]; \
}
 
#define STRUCT_TO_USPACE(dst,src) copy_to_uspace(dst,src,sizeof(*(src)))
#define STRUCT_TO_USPACE(dst, src) copy_to_uspace(dst, src, sizeof(*(src)))
 
/** Return true if the method is a system method */
static inline int is_system_method(unative_t method)
71,8 → 71,8
*/
static inline int is_forwardable(unative_t method)
{
if (method == IPC_M_PHONE_HUNGUP || method == IPC_M_AS_AREA_SEND \
|| method == IPC_M_AS_AREA_RECV)
if (method == IPC_M_PHONE_HUNGUP || method == IPC_M_AS_AREA_SEND ||
method == IPC_M_AS_AREA_RECV)
return 0; /* This message is meant only for the receiver */
return 1;
}
130,18 → 130,20
phone_dealloc(phoneid);
} else {
/* The connection was accepted */
phone_connect(phoneid,&answer->sender->answerbox);
phone_connect(phoneid, &answer->sender->answerbox);
/* Set 'phone identification' as arg3 of response */
IPC_SET_ARG3(answer->data, (unative_t)&TASK->phones[phoneid]);
IPC_SET_ARG3(answer->data,
(unative_t) &TASK->phones[phoneid]);
}
} else if (IPC_GET_METHOD(*olddata) == IPC_M_CONNECT_ME_TO) {
/* If the users accepted call, connect */
if (!IPC_GET_RETVAL(answer->data)) {
ipc_phone_connect((phone_t *)IPC_GET_ARG3(*olddata),
&TASK->answerbox);
ipc_phone_connect((phone_t *) IPC_GET_ARG3(*olddata),
&TASK->answerbox);
}
} else if (IPC_GET_METHOD(*olddata) == IPC_M_AS_AREA_SEND) {
if (!IPC_GET_RETVAL(answer->data)) { /* Accepted, handle as_area receipt */
if (!IPC_GET_RETVAL(answer->data)) {
/* Accepted, handle as_area receipt */
ipl_t ipl;
int rc;
as_t *as;
152,8 → 154,9
spinlock_unlock(&answer->sender->lock);
interrupts_restore(ipl);
rc = as_area_share(as, IPC_GET_ARG1(*olddata), IPC_GET_ARG2(*olddata),
AS, IPC_GET_ARG1(answer->data), IPC_GET_ARG3(*olddata));
rc = as_area_share(as, IPC_GET_ARG1(*olddata),
IPC_GET_ARG2(*olddata), AS,
IPC_GET_ARG1(answer->data), IPC_GET_ARG3(*olddata));
IPC_SET_RETVAL(answer->data, rc);
return rc;
}
169,8 → 172,9
spinlock_unlock(&answer->sender->lock);
interrupts_restore(ipl);
rc = as_area_share(AS, IPC_GET_ARG1(answer->data), IPC_GET_ARG2(*olddata),
as, IPC_GET_ARG1(*olddata), IPC_GET_ARG2(answer->data));
rc = as_area_share(AS, IPC_GET_ARG1(answer->data),
IPC_GET_ARG2(*olddata), as, IPC_GET_ARG1(*olddata),
IPC_GET_ARG2(answer->data));
IPC_SET_RETVAL(answer->data, rc);
}
}
192,7 → 196,7
if (newphid < 0)
return ELIMIT;
/* Set arg3 for server */
IPC_SET_ARG3(call->data, (unative_t)&TASK->phones[newphid]);
IPC_SET_ARG3(call->data, (unative_t) &TASK->phones[newphid]);
call->flags |= IPC_CALL_CONN_ME_TO;
call->priv = newphid;
break;
217,8 → 221,8
/** Do basic kernel processing of received call answer */
static void process_answer(call_t *call)
{
if (IPC_GET_RETVAL(call->data) == EHANGUP && \
call->flags & IPC_CALL_FORWARDED)
if (IPC_GET_RETVAL(call->data) == EHANGUP &&
(call->flags & IPC_CALL_FORWARDED))
IPC_SET_RETVAL(call->data, EFORWARD);
 
if (call->flags & IPC_CALL_CONN_ME_TO) {
233,7 → 237,7
*
* @return 0 - the call should be passed to userspace, 1 - ignore call
*/
static int process_request(answerbox_t *box,call_t *call)
static int process_request(answerbox_t *box, call_t *call)
{
int phoneid;
 
254,8 → 258,8
* @return Call identification, returns -1 on fatal error,
-2 on 'Too many async request, handle answers first
*/
unative_t sys_ipc_call_sync_fast(unative_t phoneid, unative_t method,
unative_t arg1, ipc_data_t *data)
unative_t sys_ipc_call_sync_fast(unative_t phoneid, unative_t method,
unative_t arg1, ipc_data_t *data)
{
call_t call;
phone_t *phone;
278,8 → 282,8
}
 
/** Synchronous IPC call allowing to send whole message */
unative_t sys_ipc_call_sync(unative_t phoneid, ipc_data_t *question,
ipc_data_t *reply)
unative_t sys_ipc_call_sync(unative_t phoneid, ipc_data_t *question,
ipc_data_t *reply)
{
call_t call;
phone_t *phone;
287,7 → 291,8
int rc;
 
ipc_call_static_init(&call);
rc = copy_from_uspace(&call.data.args, &question->args, sizeof(call.data.args));
rc = copy_from_uspace(&call.data.args, &question->args,
sizeof(call.data.args));
if (rc != 0)
return (unative_t) rc;
 
324,8 → 329,8
* @return Call identification, returns -1 on fatal error,
-2 on 'Too many async request, handle answers first
*/
unative_t sys_ipc_call_async_fast(unative_t phoneid, unative_t method,
unative_t arg1, unative_t arg2)
unative_t sys_ipc_call_async_fast(unative_t phoneid, unative_t method,
unative_t arg1, unative_t arg2)
{
call_t *call;
phone_t *phone;
342,7 → 347,7
IPC_SET_ARG2(call->data, arg2);
IPC_SET_ARG3(call->data, 0);
 
if (!(res=request_preprocess(call)))
if (!(res = request_preprocess(call)))
ipc_call(phone, call);
else
ipc_backsend_err(phone, call, res);
367,12 → 372,13
GET_CHECK_PHONE(phone, phoneid, return IPC_CALLRET_FATAL);
 
call = ipc_call_alloc(0);
rc = copy_from_uspace(&call->data.args, &data->args, sizeof(call->data.args));
rc = copy_from_uspace(&call->data.args, &data->args,
sizeof(call->data.args));
if (rc != 0) {
ipc_call_free(call);
return (unative_t) rc;
}
if (!(res=request_preprocess(call)))
if (!(res = request_preprocess(call)))
ipc_call(phone, call);
else
ipc_backsend_err(phone, call, res);
388,7 → 394,7
* arg3 is not rewritten for certain system IPC
*/
unative_t sys_ipc_forward_fast(unative_t callid, unative_t phoneid,
unative_t method, unative_t arg1)
unative_t method, unative_t arg1)
{
call_t *call;
phone_t *phone;
429,8 → 435,8
}
 
/** Send IPC answer */
unative_t sys_ipc_answer_fast(unative_t callid, unative_t retval,
unative_t arg1, unative_t arg2)
unative_t sys_ipc_answer_fast(unative_t callid, unative_t retval,
unative_t arg1, unative_t arg2)
{
call_t *call;
ipc_data_t saved_data;
480,7 → 486,7
saveddata = 1;
}
rc = copy_from_uspace(&call->data.args, &data->args,
sizeof(call->data.args));
sizeof(call->data.args));
if (rc != 0)
return rc;
 
508,9 → 514,10
 
/** Wait for incoming ipc call or answer
*
* @param calldata Pointer to buffer where the call/answer data is stored
* @param usec Timeout. See waitq_sleep_timeout() for explanation.
* @param flags Select mode of sleep operation. See waitq_sleep_timeout() for explanation.
* @param calldata Pointer to buffer where the call/answer data is stored.
* @param usec Timeout. See waitq_sleep_timeout() for explanation.
* @param flags Select mode of sleep operation. See waitq_sleep_timeout()
* for explanation.
*
* @return Callid, if callid & 1, then the call is answer
*/
519,7 → 526,8
call_t *call;
 
restart:
call = ipc_wait_for_call(&TASK->answerbox, usec, flags | SYNCH_FLAGS_INTERRUPTIBLE);
call = ipc_wait_for_call(&TASK->answerbox, usec,
flags | SYNCH_FLAGS_INTERRUPTIBLE);
if (!call)
return 0;
 
574,7 → 582,8
*
* @return EPERM or a return code returned by ipc_irq_register().
*/
unative_t sys_ipc_register_irq(inr_t inr, devno_t devno, unative_t method, irq_code_t *ucode)
unative_t sys_ipc_register_irq(inr_t inr, devno_t devno, unative_t method,
irq_code_t *ucode)
{
if (!(cap_get(TASK) & CAP_IRQ_REG))
return EPERM;
/branches/fs/kernel/generic/src/ipc/ipc.c
163,7 → 163,7
spinlock_lock(&callerbox->lock);
list_append(&call->link, &callerbox->answers);
spinlock_unlock(&callerbox->lock);
waitq_wakeup(&callerbox->wq, 0);
waitq_wakeup(&callerbox->wq, WAKEUP_FIRST);
}
 
/** Answer message, that is in callee queue
205,7 → 205,7
spinlock_lock(&box->lock);
list_append(&call->link, &box->calls);
spinlock_unlock(&box->lock);
waitq_wakeup(&box->wq, 0);
waitq_wakeup(&box->wq, WAKEUP_FIRST);
}
 
/** Send a asynchronous request using phone to answerbox
374,6 → 374,7
int i;
call_t *call;
phone_t *phone;
DEADLOCK_PROBE_INIT(p_phonelck);
 
/* Disconnect all our phones ('ipc_phone_hangup') */
for (i=0;i < IPC_MAX_PHONES; i++)
387,9 → 388,10
spinlock_lock(&TASK->answerbox.lock);
while (!list_empty(&TASK->answerbox.connected_phones)) {
phone = list_get_instance(TASK->answerbox.connected_phones.next,
phone_t, link);
phone_t, link);
if (! spinlock_trylock(&phone->lock)) {
spinlock_unlock(&TASK->answerbox.lock);
DEADLOCK_PROBE(p_phonelck, DEADLOCK_THRESHOLD);
goto restart_phones;
}
500,7 → 502,7
printf("ABOX - CALLS:\n");
for (tmp=task->answerbox.calls.next; tmp != &task->answerbox.calls;tmp = tmp->next) {
call = list_get_instance(tmp, call_t, link);
printf("Callid: %p Srctask:%lld M:%d A1:%d A2:%d A3:%d Flags:%x\n",call,
printf("Callid: %p Srctask:%llu M:%d A1:%d A2:%d A3:%d Flags:%x\n",call,
call->sender->taskid, IPC_GET_METHOD(call->data), IPC_GET_ARG1(call->data),
IPC_GET_ARG2(call->data), IPC_GET_ARG3(call->data), call->flags);
}
510,7 → 512,7
tmp != &task->answerbox.dispatched_calls;
tmp = tmp->next) {
call = list_get_instance(tmp, call_t, link);
printf("Callid: %p Srctask:%lld M:%d A1:%d A2:%d A3:%d Flags:%x\n",call,
printf("Callid: %p Srctask:%llu M:%d A1:%d A2:%d A3:%d Flags:%x\n",call,
call->sender->taskid, IPC_GET_METHOD(call->data), IPC_GET_ARG1(call->data),
IPC_GET_ARG2(call->data), IPC_GET_ARG3(call->data), call->flags);
}
/branches/fs/kernel/generic/src/ipc/irq.c
336,6 → 336,7
while (box->irq_head.next != &box->irq_head) {
link_t *cur = box->irq_head.next;
irq_t *irq;
DEADLOCK_PROBE_INIT(p_irqlock);
irq = list_get_instance(cur, irq_t, notif_cfg.link);
if (!spinlock_trylock(&irq->lock)) {
344,6 → 345,7
*/
spinlock_unlock(&box->irq_lock);
interrupts_restore(ipl);
DEADLOCK_PROBE(p_irqlock, DEADLOCK_THRESHOLD);
goto loop;
}
/branches/fs/kernel/Makefile
95,10 → 95,6
DEFS += -DCONFIG_VIRT_IDX_DCACHE
endif
 
ifeq ($(CONFIG_POWEROFF),y)
DEFS += -DCONFIG_POWEROFF
endif
 
ifeq ($(CONFIG_FB),y)
ifeq ($(ARCH),ia32)
DEFS += -DCONFIG_VESA_WIDTH=$(CONFIG_VESA_WIDTH)
160,6 → 156,7
generic/src/main/kinit.c \
generic/src/main/uinit.c \
generic/src/main/version.c \
generic/src/main/shutdown.c \
generic/src/proc/scheduler.c \
generic/src/proc/thread.c \
generic/src/proc/task.c \
/branches/fs/kernel/arch/sparc64/include/types.h
62,6 → 62,7
typedef int64_t native_t;
 
typedef uint8_t bool;
typedef uint64_t thread_id_t;
typedef uint64_t task_id_t;
typedef uint32_t context_id_t;
 
/branches/fs/kernel/arch/sparc64/include/stack.h
43,7 → 43,7
/**
* 16-extended-word save area for %i[0-7] and %l[0-7] registers.
*/
#define STACK_WINDOW_SAVE_AREA_SIZE (16*STACK_ITEM_SIZE)
#define STACK_WINDOW_SAVE_AREA_SIZE (16 * STACK_ITEM_SIZE)
 
/**
* By convention, the actual top of the stack is %sp + STACK_BIAS.
/branches/fs/kernel/arch/sparc64/include/trap/mmu.h
129,7 → 129,21
wrpr %g0, 1, %tl
.endif
 
/*
* Switch from the MM globals.
*/
wrpr %g0, PSTATE_PRIV_BIT | PSTATE_AG_BIT, %pstate
 
/*
* Read the Tag Access register for the higher-level handler.
* This is necessary to survive nested DTLB misses.
*/
mov VA_DMMU_TAG_ACCESS, %g2
ldxa [%g2] ASI_DMMU, %g2
 
/*
* g2 will be passed as an argument to fast_data_access_mmu_miss().
*/
PREEMPTIBLE_HANDLER fast_data_access_mmu_miss
.endm
 
142,7 → 156,21
wrpr %g0, 1, %tl
.endif
 
/*
* Switch from the MM globals.
*/
wrpr %g0, PSTATE_PRIV_BIT | PSTATE_AG_BIT, %pstate
 
/*
* Read the Tag Access register for the higher-level handler.
* This is necessary to survive nested DTLB misses.
*/
mov VA_DMMU_TAG_ACCESS, %g2
ldxa [%g2] ASI_DMMU, %g2
 
/*
* g2 will be passed as an argument to fast_data_access_mmu_miss().
*/
PREEMPTIBLE_HANDLER fast_data_access_protection
.endm
 
/branches/fs/kernel/arch/sparc64/include/mm/frame.h
35,7 → 35,20
#ifndef KERN_sparc64_FRAME_H_
#define KERN_sparc64_FRAME_H_
 
#define FRAME_WIDTH 13 /* 8K */
/*
* Page size supported by the MMU.
* For 8K there is the nasty illegal virtual aliasing problem.
* Therefore, the kernel uses 8K only internally on the TLB and TSB levels.
*/
#define MMU_FRAME_WIDTH 13 /* 8K */
#define MMU_FRAME_SIZE (1 << MMU_FRAME_WIDTH)
 
/*
* Page size exported to the generic memory management subsystems.
* This page size is not directly supported by the MMU, but we can emulate
* each 16K page with a pair of adjacent 8K pages.
*/
#define FRAME_WIDTH 14 /* 16K */
#define FRAME_SIZE (1 << FRAME_WIDTH)
 
#ifdef KERNEL
/branches/fs/kernel/arch/sparc64/include/mm/page.h
37,11 → 37,27
 
#include <arch/mm/frame.h>
 
/*
* On the TLB and TSB level, we still use 8K pages, which are supported by the
* MMU.
*/
#define MMU_PAGE_WIDTH MMU_FRAME_WIDTH
#define MMU_PAGE_SIZE MMU_FRAME_SIZE
 
/*
* On the page table level, we use 16K pages. 16K pages are not supported by
* the MMU but we emulate them with pairs of 8K pages.
*/
#define PAGE_WIDTH FRAME_WIDTH
#define PAGE_SIZE FRAME_SIZE
 
#define PAGE_COLOR_BITS 1 /**< 14 - 13; 2^14 == 16K == alias boundary. */
#define MMU_PAGES_PER_PAGE (1 << (PAGE_WIDTH - MMU_PAGE_WIDTH))
 
/*
* With 16K pages, there is only one page color.
*/
#define PAGE_COLOR_BITS 0 /**< 14 - 14; 2^14 == 16K == alias boundary. */
 
#ifdef KERNEL
 
#ifndef __ASM__
/branches/fs/kernel/arch/sparc64/include/mm/tlb.h
428,9 → 428,9
membar();
}
 
extern void fast_instruction_access_mmu_miss(int n, istate_t *istate);
extern void fast_data_access_mmu_miss(int n, istate_t *istate);
extern void fast_data_access_protection(int n, istate_t *istate);
extern void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate);
extern void fast_data_access_mmu_miss(tlb_tag_access_reg_t tag, istate_t *istate);
extern void fast_data_access_protection(tlb_tag_access_reg_t tag , istate_t *istate);
 
extern void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize, bool locked, bool cacheable);
 
/branches/fs/kernel/arch/sparc64/include/mm/as.h
81,10 → 81,11
#include <genarch/mm/as_ht.h>
 
#ifdef CONFIG_TSB
# include <arch/mm/tsb.h>
# define as_invalidate_translation_cache(as, page, cnt) tsb_invalidate(as, page, cnt)
#include <arch/mm/tsb.h>
#define as_invalidate_translation_cache(as, page, cnt) \
tsb_invalidate((as), (page), (cnt))
#else
# define as_invalidate_translation_cache(as, page, cnt)
#define as_invalidate_translation_cache(as, page, cnt)
#endif
 
extern void as_arch_init(void);
/branches/fs/kernel/arch/sparc64/include/mm/tsb.h
112,8 → 112,8
struct pte;
 
extern void tsb_invalidate(struct as *as, uintptr_t page, count_t pages);
extern void itsb_pte_copy(struct pte *t);
extern void dtsb_pte_copy(struct pte *t, bool ro);
extern void itsb_pte_copy(struct pte *t, index_t index);
extern void dtsb_pte_copy(struct pte *t, index_t index, bool ro);
 
#endif /* !def __ASM__ */
 
/branches/fs/kernel/arch/sparc64/include/barrier.h
39,12 → 39,12
* Our critical section barriers are prepared for the weakest RMO memory model.
*/
#define CS_ENTER_BARRIER() \
asm volatile ( \
asm volatile ( \
"membar #LoadLoad | #LoadStore\n" \
::: "memory" \
)
#define CS_LEAVE_BARRIER() \
asm volatile ( \
asm volatile ( \
"membar #StoreStore\n" \
"membar #LoadStore\n" \
::: "memory" \
/branches/fs/kernel/arch/sparc64/include/cpu.h
39,6 → 39,10
#include <arch/register.h>
#include <arch/asm.h>
 
#ifdef CONFIG_SMP
#include <arch/mm/cache.h>
#endif
 
#define MANUF_FUJITSU 0x04
#define MANUF_ULTRASPARC 0x17 /**< UltraSPARC I, UltraSPARC II */
#define MANUF_SUN 0x3e
53,12 → 57,13
#define IMPL_SPARC64V 0x5
 
typedef struct {
uint32_t mid; /**< Processor ID as read from UPA_CONFIG. */
uint32_t mid; /**< Processor ID as read from
UPA_CONFIG. */
ver_reg_t ver;
uint32_t clock_frequency; /**< Processor frequency in Hz. */
uint64_t next_tick_cmpr; /**< Next clock interrupt should be
generated when the TICK register
matches this value. */
generated when the TICK register
matches this value. */
} cpu_arch_t;
#endif
/branches/fs/kernel/arch/sparc64/Makefile.inc
83,8 → 83,7
arch/$(ARCH)/src/fpu_context.c \
arch/$(ARCH)/src/dummy.s \
arch/$(ARCH)/src/mm/as.c \
arch/$(ARCH)/src/mm/cache.c \
arch/$(ARCH)/src/mm/cache_asm.S \
arch/$(ARCH)/src/mm/cache.S \
arch/$(ARCH)/src/mm/frame.c \
arch/$(ARCH)/src/mm/page.c \
arch/$(ARCH)/src/mm/tlb.c \
/branches/fs/kernel/arch/sparc64/src/smp/smp.c
100,7 → 100,7
if (waitq_sleep_timeout(&ap_completion_wq, 1000000, SYNCH_FLAGS_NONE) == ESYNCH_TIMEOUT)
printf("%s: waiting for processor (mid = %d) timed out\n",
__FUNCTION__, mid);
__FUNCTION__, mid);
}
}
 
/branches/fs/kernel/arch/sparc64/src/smp/ipi.c
74,13 → 74,13
panic("Interrupt Dispatch Status busy bit set\n");
do {
asi_u64_write(ASI_UDB_INTR_W, ASI_UDB_INTR_W_DATA_0, (uintptr_t)
func);
asi_u64_write(ASI_UDB_INTR_W, ASI_UDB_INTR_W_DATA_0,
(uintptr_t) func);
asi_u64_write(ASI_UDB_INTR_W, ASI_UDB_INTR_W_DATA_1, 0);
asi_u64_write(ASI_UDB_INTR_W, ASI_UDB_INTR_W_DATA_2, 0);
asi_u64_write(ASI_UDB_INTR_W, (mid <<
INTR_VEC_DISPATCH_MID_SHIFT) | ASI_UDB_INTR_W_DISPATCH,
0);
asi_u64_write(ASI_UDB_INTR_W,
(mid << INTR_VEC_DISPATCH_MID_SHIFT) |
ASI_UDB_INTR_W_DISPATCH, 0);
membar();
/branches/fs/kernel/arch/sparc64/src/asm.S
273,7 → 273,7
flushw
wrpr %g0, 0, %cleanwin ! avoid information leak
 
mov %i3, %o0 ! uarg
mov %i2, %o0 ! uarg
 
clr %i2
clr %i3
/branches/fs/kernel/arch/sparc64/src/proc/scheduler.c
62,9 → 62,8
* - preemptible trap handler switches to alternate globals
* before it explicitly uses %g7.
*/
uint64_t sp = (uintptr_t) THREAD->kstack + STACK_SIZE
- (STACK_BIAS + ALIGN_UP(STACK_ITEM_SIZE,
STACK_ALIGNMENT));
uint64_t sp = (uintptr_t) THREAD->kstack + STACK_SIZE -
(STACK_BIAS + ALIGN_UP(STACK_ITEM_SIZE, STACK_ALIGNMENT));
write_to_ig_g6(sp);
write_to_ag_g6(sp);
write_to_ag_g7((uintptr_t) THREAD->arch.uspace_window_buffer);
/branches/fs/kernel/arch/sparc64/src/proc/thread.c
55,7 → 55,7
* belonging to a killed thread.
*/
frame_free(KA2PA(ALIGN_DOWN((uintptr_t)
t->arch.uspace_window_buffer, PAGE_SIZE)));
t->arch.uspace_window_buffer, PAGE_SIZE)));
}
}
 
75,8 → 75,8
* Mind the possible alignment of the userspace window buffer
* belonging to a killed thread.
*/
t->arch.uspace_window_buffer = (uint8_t *) ALIGN_DOWN(uw_buf,
PAGE_SIZE);
t->arch.uspace_window_buffer = (uint8_t *) ALIGN_DOWN(uw_buf,
PAGE_SIZE);
}
}
 
/branches/fs/kernel/arch/sparc64/src/sparc64.c
155,5 → 155,11
/* not reached */
}
 
void arch_reboot(void)
{
// TODO
while (1);
}
 
/** @}
*/
/branches/fs/kernel/arch/sparc64/src/trap/interrupt.c
83,9 → 83,9
} else if (data0 > config.base) {
/*
* This is a cross-call.
* data0 contains address of kernel function.
* data0 contains address of the kernel function.
* We call the function only after we verify
* it is on of the supported ones.
* it is one of the supported ones.
*/
#ifdef CONFIG_SMP
if (data0 == (uintptr_t) tlb_shootdown_ipi_recv) {
/branches/fs/kernel/arch/sparc64/src/cpu/cpu.c
63,10 → 63,10
mid = *((uint32_t *) prop->value);
if (mid == CPU->arch.mid) {
prop = ofw_tree_getprop(node,
"clock-frequency");
"clock-frequency");
if (prop && prop->value)
clock_frequency = *((uint32_t *)
prop->value);
prop->value);
}
}
node = ofw_tree_find_peer_by_device_type(node, "cpu");
/branches/fs/kernel/arch/sparc64/src/mm/cache_asm.S
File deleted
/branches/fs/kernel/arch/sparc64/src/mm/cache.c
File deleted
/branches/fs/kernel/arch/sparc64/src/mm/tlb.c
54,14 → 54,14
#include <arch/mm/tsb.h>
#endif
 
static void dtlb_pte_copy(pte_t *t, bool ro);
static void itlb_pte_copy(pte_t *t);
static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const
char *str);
static void dtlb_pte_copy(pte_t *t, index_t index, bool ro);
static void itlb_pte_copy(pte_t *t, index_t index);
static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,
const char *str);
static void do_fast_data_access_mmu_miss_fault(istate_t *istate,
tlb_tag_access_reg_t tag, const char *str);
tlb_tag_access_reg_t tag, const char *str);
static void do_fast_data_access_protection_fault(istate_t *istate,
tlb_tag_access_reg_t tag, const char *str);
tlb_tag_access_reg_t tag, const char *str);
 
char *context_encoding[] = {
"Primary",
92,8 → 92,8
* @param locked True for permanent mappings, false otherwise.
* @param cacheable True if the mapping is cacheable, false otherwise.
*/
void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize, bool
locked, bool cacheable)
void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize,
bool locked, bool cacheable)
{
tlb_tag_access_reg_t tag;
tlb_data_t data;
126,11 → 126,12
 
/** Copy PTE to TLB.
*
* @param t Page Table Entry to be copied.
* @param ro If true, the entry will be created read-only, regardless of its w
* field.
* @param t Page Table Entry to be copied.
* @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
* @param ro If true, the entry will be created read-only, regardless of its
* w field.
*/
void dtlb_pte_copy(pte_t *t, bool ro)
void dtlb_pte_copy(pte_t *t, index_t index, bool ro)
{
tlb_tag_access_reg_t tag;
tlb_data_t data;
137,15 → 138,15
page_address_t pg;
frame_address_t fr;
 
pg.address = t->page;
fr.address = t->frame;
pg.address = t->page + (index << MMU_PAGE_WIDTH);
fr.address = t->frame + (index << MMU_PAGE_WIDTH);
 
tag.value = 0;
tag.context = t->as->asid;
tag.vpn = pg.vpn;
 
dtlb_tag_access_write(tag.value);
 
data.value = 0;
data.v = true;
data.size = PAGESIZE_8K;
158,15 → 159,16
data.p = t->k; /* p like privileged */
data.w = ro ? false : t->w;
data.g = t->g;
 
dtlb_data_in_write(data.value);
}
 
/** Copy PTE to ITLB.
*
* @param t Page Table Entry to be copied.
* @param t Page Table Entry to be copied.
* @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
*/
void itlb_pte_copy(pte_t *t)
void itlb_pte_copy(pte_t *t, index_t index)
{
tlb_tag_access_reg_t tag;
tlb_data_t data;
173,8 → 175,8
page_address_t pg;
frame_address_t fr;
 
pg.address = t->page;
fr.address = t->frame;
pg.address = t->page + (index << MMU_PAGE_WIDTH);
fr.address = t->frame + (index << MMU_PAGE_WIDTH);
 
tag.value = 0;
tag.context = t->as->asid;
196,9 → 198,10
}
 
/** ITLB miss handler. */
void fast_instruction_access_mmu_miss(int n, istate_t *istate)
void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate)
{
uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE);
index_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE;
pte_t *t;
 
page_table_lock(AS, true);
209,9 → 212,9
* Insert it into ITLB.
*/
t->a = true;
itlb_pte_copy(t);
itlb_pte_copy(t, index);
#ifdef CONFIG_TSB
itsb_pte_copy(t);
itsb_pte_copy(t, index);
#endif
page_table_unlock(AS, true);
} else {
222,7 → 225,7
page_table_unlock(AS, true);
if (as_page_fault(va, PF_ACCESS_EXEC, istate) == AS_PF_FAULT) {
do_fast_instruction_access_mmu_miss_fault(istate,
__FUNCTION__);
__FUNCTION__);
}
}
}
231,24 → 234,29
*
* Note that some faults (e.g. kernel faults) were already resolved by the
* low-level, assembly language part of the fast_data_access_mmu_miss handler.
*
* @param tag Content of the TLB Tag Access register as it existed when the
* trap happened. This is to prevent confusion created by clobbered
* Tag Access register during a nested DTLB miss.
* @param istate Interrupted state saved on the stack.
*/
void fast_data_access_mmu_miss(int n, istate_t *istate)
void fast_data_access_mmu_miss(tlb_tag_access_reg_t tag, istate_t *istate)
{
tlb_tag_access_reg_t tag;
uintptr_t va;
index_t index;
pte_t *t;
 
tag.value = dtlb_tag_access_read();
va = tag.vpn << PAGE_WIDTH;
va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
index = tag.vpn % MMU_PAGES_PER_PAGE;
 
if (tag.context == ASID_KERNEL) {
if (!tag.vpn) {
/* NULL access in kernel */
do_fast_data_access_mmu_miss_fault(istate, tag,
__FUNCTION__);
__FUNCTION__);
}
do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected "
"kernel page fault.");
"kernel page fault.");
}
 
page_table_lock(AS, true);
259,32 → 267,39
* Insert it into DTLB.
*/
t->a = true;
dtlb_pte_copy(t, true);
dtlb_pte_copy(t, index, true);
#ifdef CONFIG_TSB
dtsb_pte_copy(t, true);
dtsb_pte_copy(t, index, true);
#endif
page_table_unlock(AS, true);
} else {
/*
* Forward the page fault to the address space page fault handler.
* Forward the page fault to the address space page fault
* handler.
*/
page_table_unlock(AS, true);
if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
do_fast_data_access_mmu_miss_fault(istate, tag,
__FUNCTION__);
__FUNCTION__);
}
}
}
 
/** DTLB protection fault handler. */
void fast_data_access_protection(int n, istate_t *istate)
/** DTLB protection fault handler.
*
* @param tag Content of the TLB Tag Access register as it existed when the
* trap happened. This is to prevent confusion created by clobbered
* Tag Access register during a nested DTLB miss.
* @param istate Interrupted state saved on the stack.
*/
void fast_data_access_protection(tlb_tag_access_reg_t tag, istate_t *istate)
{
tlb_tag_access_reg_t tag;
uintptr_t va;
index_t index;
pte_t *t;
 
tag.value = dtlb_tag_access_read();
va = tag.vpn << PAGE_WIDTH;
va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE);
index = tag.vpn % MMU_PAGES_PER_PAGE; /* 16K-page emulation */
 
page_table_lock(AS, true);
t = page_mapping_find(AS, va);
296,10 → 311,11
*/
t->a = true;
t->d = true;
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY, va);
dtlb_pte_copy(t, false);
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY,
va + index * MMU_PAGE_SIZE);
dtlb_pte_copy(t, index, false);
#ifdef CONFIG_TSB
dtsb_pte_copy(t, false);
dtsb_pte_copy(t, index, false);
#endif
page_table_unlock(AS, true);
} else {
310,7 → 326,7
page_table_unlock(AS, true);
if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) {
do_fast_data_access_protection_fault(istate, tag,
__FUNCTION__);
__FUNCTION__);
}
}
}
328,10 → 344,10
t.value = itlb_tag_read_read(i);
 
printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
"ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, "
"cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag,
d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
"ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, "
"cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag,
d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
}
 
printf("D-TLB contents:\n");
340,16 → 356,16
t.value = dtlb_tag_read_read(i);
printf("%d: vpn=%#llx, context=%d, v=%d, size=%d, nfo=%d, "
"ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, "
"cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag,
d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
"ie=%d, soft2=%#x, diag=%#x, pfn=%#x, soft=%#x, l=%d, "
"cp=%d, cv=%d, e=%d, p=%d, w=%d, g=%d\n", i, t.vpn,
t.context, d.v, d.size, d.nfo, d.ie, d.soft2, d.diag,
d.pfn, d.soft, d.l, d.cp, d.cv, d.e, d.p, d.w, d.g);
}
 
}
 
void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char
*str)
void do_fast_instruction_access_mmu_miss_fault(istate_t *istate,
const char *str)
{
fault_if_from_uspace(istate, "%s\n", str);
dump_istate(istate);
356,29 → 372,32
panic("%s\n", str);
}
 
void do_fast_data_access_mmu_miss_fault(istate_t *istate, tlb_tag_access_reg_t
tag, const char *str)
void do_fast_data_access_mmu_miss_fault(istate_t *istate,
tlb_tag_access_reg_t tag, const char *str)
{
uintptr_t va;
 
va = tag.vpn << PAGE_WIDTH;
 
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va,
tag.context);
va = tag.vpn << MMU_PAGE_WIDTH;
if (tag.context) {
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va,
tag.context);
}
dump_istate(istate);
printf("Faulting page: %p, ASID=%d\n", va, tag.context);
panic("%s\n", str);
}
 
void do_fast_data_access_protection_fault(istate_t *istate, tlb_tag_access_reg_t
tag, const char *str)
void do_fast_data_access_protection_fault(istate_t *istate,
tlb_tag_access_reg_t tag, const char *str)
{
uintptr_t va;
 
va = tag.vpn << PAGE_WIDTH;
va = tag.vpn << MMU_PAGE_WIDTH;
 
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va,
tag.context);
if (tag.context) {
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va,
tag.context);
}
printf("Faulting page: %p, ASID=%d\n", va, tag.context);
dump_istate(istate);
panic("%s\n", str);
393,8 → 412,8
sfar = dtlb_sfar_read();
printf("DTLB SFSR: asi=%#x, ft=%#x, e=%d, ct=%d, pr=%d, w=%d, ow=%d, "
"fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w,
sfsr.ow, sfsr.fv);
"fv=%d\n", sfsr.asi, sfsr.ft, sfsr.e, sfsr.ct, sfsr.pr, sfsr.w,
sfsr.ow, sfsr.fv);
printf("DTLB SFAR: address=%p\n", sfar);
dtlb_sfsr_write(0);
481,11 → 500,11
ctx.context = asid;
mmu_primary_context_write(ctx.v);
for (i = 0; i < cnt; i++) {
itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, page + i *
PAGE_SIZE);
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, page + i *
PAGE_SIZE);
for (i = 0; i < cnt * MMU_PAGES_PER_PAGE; i++) {
itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
page + i * MMU_PAGE_SIZE);
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY,
page + i * MMU_PAGE_SIZE);
}
mmu_primary_context_write(pc_save.v);
/branches/fs/kernel/arch/sparc64/src/mm/as.c
42,7 → 42,6
#ifdef CONFIG_TSB
#include <arch/mm/tsb.h>
#include <arch/memstr.h>
#include <synch/mutex.h>
#include <arch/asm.h>
#include <mm/frame.h>
#include <bitops.h>
61,8 → 60,13
int as_constructor_arch(as_t *as, int flags)
{
#ifdef CONFIG_TSB
/*
* The order must be calculated with respect to the emulated
* 16K page size.
*/
int order = fnzb32(((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
sizeof(tsb_entry_t)) >> FRAME_WIDTH);
sizeof(tsb_entry_t)) >> FRAME_WIDTH);
 
uintptr_t tsb = (uintptr_t) frame_alloc(order, flags | FRAME_KA);
 
if (!tsb)
70,9 → 74,10
 
as->arch.itsb = (tsb_entry_t *) tsb;
as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT *
sizeof(tsb_entry_t));
memsetb((uintptr_t) as->arch.itsb, (ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT)
* sizeof(tsb_entry_t), 0);
sizeof(tsb_entry_t));
 
memsetb((uintptr_t) as->arch.itsb,
(ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * sizeof(tsb_entry_t), 0);
#endif
return 0;
}
80,8 → 85,12
int as_destructor_arch(as_t *as)
{
#ifdef CONFIG_TSB
/*
* The count must be calculated with respect to the emualted 16K page
* size.
*/
count_t cnt = ((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) *
sizeof(tsb_entry_t)) >> FRAME_WIDTH;
sizeof(tsb_entry_t)) >> FRAME_WIDTH;
frame_free(KA2PA((uintptr_t) as->arch.itsb));
return cnt;
#else
92,13 → 101,7
int as_create_arch(as_t *as, int flags)
{
#ifdef CONFIG_TSB
ipl_t ipl;
 
ipl = interrupts_disable();
mutex_lock_active(&as->lock); /* completely unnecessary, but polite */
tsb_invalidate(as, 0, (count_t) -1);
mutex_unlock(&as->lock);
interrupts_restore(ipl);
#endif
return 0;
}
115,18 → 118,17
tlb_context_reg_t ctx;
/*
* Note that we don't lock the address space.
* That's correct - we can afford it here
* because we only read members that are
* currently read-only.
* Note that we don't and may not lock the address space. That's ok
* since we only read members that are currently read-only.
*
* Moreover, the as->asid is protected by asidlock, which is being held.
*/
/*
* Write ASID to secondary context register.
* The primary context register has to be set
* from TL>0 so it will be filled from the
* secondary context register from the TL=1
* code just before switch to userspace.
* Write ASID to secondary context register. The primary context
* register has to be set from TL>0 so it will be filled from the
* secondary context register from the TL=1 code just before switch to
* userspace.
*/
ctx.v = 0;
ctx.context = as->asid;
139,7 → 141,7
 
uintptr_t tsb = (uintptr_t) as->arch.itsb;
if (!overlaps(tsb, 8 * PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
/*
* TSBs were allocated from memory not covered
* by the locked 4M kernel DTLB entry. We need
158,9 → 160,9
tsb_base.size = TSB_SIZE;
tsb_base.split = 0;
 
tsb_base.base = ((uintptr_t) as->arch.itsb) >> PAGE_WIDTH;
tsb_base.base = ((uintptr_t) as->arch.itsb) >> MMU_PAGE_WIDTH;
itsb_base_write(tsb_base.value);
tsb_base.base = ((uintptr_t) as->arch.dtsb) >> PAGE_WIDTH;
tsb_base.base = ((uintptr_t) as->arch.dtsb) >> MMU_PAGE_WIDTH;
dtsb_base_write(tsb_base.value);
#endif
}
176,10 → 178,10
{
 
/*
* Note that we don't lock the address space.
* That's correct - we can afford it here
* because we only read members that are
* currently read-only.
* Note that we don't and may not lock the address space. That's ok
* since we only read members that are currently read-only.
*
* Moreover, the as->asid is protected by asidlock, which is being held.
*/
 
#ifdef CONFIG_TSB
189,7 → 191,7
 
uintptr_t tsb = (uintptr_t) as->arch.itsb;
if (!overlaps(tsb, 8 * PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
if (!overlaps(tsb, 8 * MMU_PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
/*
* TSBs were allocated from memory not covered
* by the locked 4M kernel DTLB entry. We need
/branches/fs/kernel/arch/sparc64/src/mm/cache.S
0,0 → 1,93
/*
* Copyright (c) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#include <arch/arch.h>
 
#define DCACHE_SIZE (16 * 1024)
#define DCACHE_LINE_SIZE 32
 
#define DCACHE_TAG_SHIFT 2
 
.register %g2, #scratch
.register %g3, #scratch
 
/** Flush the whole D-cache. */
.global dcache_flush
dcache_flush:
set (DCACHE_SIZE - DCACHE_LINE_SIZE), %g1
stxa %g0, [%g1] ASI_DCACHE_TAG
0: membar #Sync
subcc %g1, DCACHE_LINE_SIZE, %g1
bnz,pt %xcc, 0b
stxa %g0, [%g1] ASI_DCACHE_TAG
membar #Sync
retl
! beware SF Erratum #51, do not put the MEMBAR here
nop
 
/** Flush only D-cache lines of one virtual color.
*
* @param o0 Virtual color to be flushed.
*/
.global dcache_flush_color
dcache_flush_color:
mov (DCACHE_SIZE / DCACHE_LINE_SIZE) / 2, %g1
set DCACHE_SIZE / 2, %g2
sllx %g2, %o0, %g2
sub %g2, DCACHE_LINE_SIZE, %g2
0: stxa %g0, [%g2] ASI_DCACHE_TAG
membar #Sync
subcc %g1, 1, %g1
bnz,pt %xcc, 0b
sub %g2, DCACHE_LINE_SIZE, %g2
retl
nop
 
/** Flush only D-cache lines of one virtual color and one tag.
*
* @param o0 Virtual color to lookup the tag.
* @param o1 Tag of the cachelines to be flushed.
*/
.global dcache_flush_tag
dcache_flush_tag:
mov (DCACHE_SIZE / DCACHE_LINE_SIZE) / 2, %g1
set DCACHE_SIZE / 2, %g2
sllx %g2, %o0, %g2
sub %g2, DCACHE_LINE_SIZE, %g2
0: ldxa [%g2] ASI_DCACHE_TAG, %g3
srlx %g3, DCACHE_TAG_SHIFT, %g3
cmp %g3, %o1
bnz 1f
nop
stxa %g0, [%g2] ASI_DCACHE_TAG
membar #Sync
1: subcc %g1, 1, %g1
bnz,pt %xcc, 0b
sub %g2, DCACHE_LINE_SIZE, %g2
retl
nop
/branches/fs/kernel/arch/sparc64/src/mm/tsb.c
34,6 → 34,7
 
#include <arch/mm/tsb.h>
#include <arch/mm/tlb.h>
#include <arch/mm/page.h>
#include <arch/barrier.h>
#include <mm/as.h>
#include <arch/types.h>
40,7 → 41,7
#include <macros.h>
#include <debug.h>
 
#define TSB_INDEX_MASK ((1 << (21 + 1 + TSB_SIZE - PAGE_WIDTH)) - 1)
#define TSB_INDEX_MASK ((1 << (21 + 1 + TSB_SIZE - MMU_PAGE_WIDTH)) - 1)
 
/** Invalidate portion of TSB.
*
59,28 → 60,39
ASSERT(as->arch.itsb && as->arch.dtsb);
i0 = (page >> PAGE_WIDTH) & TSB_INDEX_MASK;
cnt = min(pages, ITSB_ENTRY_COUNT);
i0 = (page >> MMU_PAGE_WIDTH) & TSB_INDEX_MASK;
ASSERT(i0 < ITSB_ENTRY_COUNT && i0 < DTSB_ENTRY_COUNT);
 
if (pages == (count_t) -1 || (pages * 2) > ITSB_ENTRY_COUNT)
cnt = ITSB_ENTRY_COUNT;
else
cnt = pages * 2;
for (i = 0; i < cnt; i++) {
as->arch.itsb[(i0 + i) & (ITSB_ENTRY_COUNT - 1)].tag.invalid =
true;
true;
as->arch.dtsb[(i0 + i) & (DTSB_ENTRY_COUNT - 1)].tag.invalid =
true;
true;
}
}
 
/** Copy software PTE to ITSB.
*
* @param t Software PTE.
* @param t Software PTE.
* @param index Zero if lower 8K-subpage, one if higher 8K subpage.
*/
void itsb_pte_copy(pte_t *t)
void itsb_pte_copy(pte_t *t, index_t index)
{
as_t *as;
tsb_entry_t *tsb;
index_t entry;
 
ASSERT(index <= 1);
as = t->as;
tsb = &as->arch.itsb[(t->page >> PAGE_WIDTH) & TSB_INDEX_MASK];
entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK;
ASSERT(entry < ITSB_ENTRY_COUNT);
tsb = &as->arch.itsb[entry];
 
/*
* We use write barriers to make sure that the TSB load
95,10 → 107,11
write_barrier();
 
tsb->tag.context = as->asid;
/* the shift is bigger than PAGE_WIDTH, do not bother with index */
tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT;
tsb->data.value = 0;
tsb->data.size = PAGESIZE_8K;
tsb->data.pfn = t->frame >> FRAME_WIDTH;
tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index;
tsb->data.cp = t->c;
tsb->data.p = t->k; /* p as privileged */
tsb->data.v = t->p;
110,16 → 123,22
 
/** Copy software PTE to DTSB.
*
* @param t Software PTE.
* @param ro If true, the mapping is copied read-only.
* @param t Software PTE.
* @param index Zero if lower 8K-subpage, one if higher 8K-subpage.
* @param ro If true, the mapping is copied read-only.
*/
void dtsb_pte_copy(pte_t *t, bool ro)
void dtsb_pte_copy(pte_t *t, index_t index, bool ro)
{
as_t *as;
tsb_entry_t *tsb;
index_t entry;
ASSERT(index <= 1);
 
as = t->as;
tsb = &as->arch.dtsb[(t->page >> PAGE_WIDTH) & TSB_INDEX_MASK];
entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK;
ASSERT(entry < DTSB_ENTRY_COUNT);
tsb = &as->arch.dtsb[entry];
 
/*
* We use write barriers to make sure that the TSB load
134,10 → 153,11
write_barrier();
 
tsb->tag.context = as->asid;
/* the shift is bigger than PAGE_WIDTH, do not bother with index */
tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT;
tsb->data.value = 0;
tsb->data.size = PAGESIZE_8K;
tsb->data.pfn = t->frame >> FRAME_WIDTH;
tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index;
tsb->data.cp = t->c;
#ifdef CONFIG_VIRT_IDX_DCACHE
tsb->data.cv = t->c;
148,7 → 168,7
write_barrier();
tsb->tag.invalid = true; /* mark the entry as valid */
tsb->tag.invalid = false; /* mark the entry as valid */
}
 
/** @}
/branches/fs/kernel/arch/sparc64/src/mm/frame.c
65,10 → 65,10
if (confdata == ADDR2PFN(KA2PA(PFN2ADDR(0))))
confdata = ADDR2PFN(KA2PA(PFN2ADDR(2)));
zone_create(ADDR2PFN(start),
SIZE2FRAMES(ALIGN_DOWN(size, FRAME_SIZE)),
confdata, 0);
SIZE2FRAMES(ALIGN_DOWN(size, FRAME_SIZE)),
confdata, 0);
last_frame = max(last_frame, start + ALIGN_UP(size,
FRAME_SIZE));
FRAME_SIZE));
}
 
/*
/branches/fs/kernel/arch/sparc64/src/mm/page.c
73,9 → 73,9
*/
for (i = 0; i < bsp_locked_dtlb_entries; i++) {
dtlb_insert_mapping(bsp_locked_dtlb_entry[i].virt_page,
bsp_locked_dtlb_entry[i].phys_page,
bsp_locked_dtlb_entry[i].pagesize_code, true,
false);
bsp_locked_dtlb_entry[i].phys_page,
bsp_locked_dtlb_entry[i].pagesize_code, true,
false);
}
#endif
 
107,26 → 107,26
size_t increment;
count_t count;
} sizemap[] = {
{ PAGESIZE_8K, 0, 1 }, /* 8K */
{ PAGESIZE_8K, PAGE_SIZE, 2 }, /* 16K */
{ PAGESIZE_8K, PAGE_SIZE, 4 }, /* 32K */
{ PAGESIZE_64K, 0, 1}, /* 64K */
{ PAGESIZE_64K, 8 * PAGE_SIZE, 2 }, /* 128K */
{ PAGESIZE_64K, 8 * PAGE_SIZE, 4 }, /* 256K */
{ PAGESIZE_512K, 0, 1 }, /* 512K */
{ PAGESIZE_512K, 64 * PAGE_SIZE, 2 }, /* 1M */
{ PAGESIZE_512K, 64 * PAGE_SIZE, 4 }, /* 2M */
{ PAGESIZE_4M, 0, 1 }, /* 4M */
{ PAGESIZE_4M, 512 * PAGE_SIZE, 2 } /* 8M */
{ PAGESIZE_8K, 0, 1 }, /* 8K */
{ PAGESIZE_8K, MMU_PAGE_SIZE, 2 }, /* 16K */
{ PAGESIZE_8K, MMU_PAGE_SIZE, 4 }, /* 32K */
{ PAGESIZE_64K, 0, 1}, /* 64K */
{ PAGESIZE_64K, 8 * MMU_PAGE_SIZE, 2 }, /* 128K */
{ PAGESIZE_64K, 8 * MMU_PAGE_SIZE, 4 }, /* 256K */
{ PAGESIZE_512K, 0, 1 }, /* 512K */
{ PAGESIZE_512K, 64 * MMU_PAGE_SIZE, 2 }, /* 1M */
{ PAGESIZE_512K, 64 * MMU_PAGE_SIZE, 4 }, /* 2M */
{ PAGESIZE_4M, 0, 1 }, /* 4M */
{ PAGESIZE_4M, 512 * MMU_PAGE_SIZE, 2 } /* 8M */
};
ASSERT(ALIGN_UP(physaddr, PAGE_SIZE) == physaddr);
ASSERT(ALIGN_UP(physaddr, MMU_PAGE_SIZE) == physaddr);
ASSERT(size <= 8 * 1024 * 1024);
if (size <= FRAME_SIZE)
if (size <= MMU_FRAME_SIZE)
order = 0;
else
order = (fnzb64(size - 1) + 1) - FRAME_WIDTH;
order = (fnzb64(size - 1) + 1) - MMU_FRAME_WIDTH;
 
/*
* Use virtual addresses that are beyond the limit of physical memory.
134,8 → 134,10
* by frame_alloc().
*/
ASSERT(PA2KA(last_frame));
uintptr_t virtaddr = ALIGN_UP(PA2KA(last_frame), 1 << (order + FRAME_WIDTH));
last_frame = ALIGN_UP(KA2PA(virtaddr) + size, 1 << (order + FRAME_WIDTH));
uintptr_t virtaddr = ALIGN_UP(PA2KA(last_frame),
1 << (order + FRAME_WIDTH));
last_frame = ALIGN_UP(KA2PA(virtaddr) + size,
1 << (order + FRAME_WIDTH));
for (i = 0; i < sizemap[order].count; i++) {
/*
142,8 → 144,8
* First, insert the mapping into DTLB.
*/
dtlb_insert_mapping(virtaddr + i * sizemap[order].increment,
physaddr + i * sizemap[order].increment,
sizemap[order].pagesize_code, true, false);
physaddr + i * sizemap[order].increment,
sizemap[order].pagesize_code, true, false);
#ifdef CONFIG_SMP
/*
150,11 → 152,11
* Second, save the information about the mapping for APs.
*/
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].virt_page =
virtaddr + i * sizemap[order].increment;
virtaddr + i * sizemap[order].increment;
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].phys_page =
physaddr + i * sizemap[order].increment;
physaddr + i * sizemap[order].increment;
bsp_locked_dtlb_entry[bsp_locked_dtlb_entries].pagesize_code =
sizemap[order].pagesize_code;
sizemap[order].pagesize_code;
bsp_locked_dtlb_entries++;
#endif
}
/branches/fs/kernel/arch/ia64/include/types.h
70,6 → 70,7
typedef int64_t native_t;
 
typedef uint8_t bool;
typedef uint64_t thread_id_t;
typedef uint64_t task_id_t;
typedef uint32_t context_id_t;
 
/branches/fs/kernel/arch/ia64/src/ia64.c
176,5 → 176,11
#endif
}
 
void arch_reboot(void)
{
// TODO
while (1);
}
 
/** @}
*/
/branches/fs/kernel/arch/ia64/src/mm/as.c
36,11 → 36,10
#include <arch/mm/asid.h>
#include <arch/mm/page.h>
#include <genarch/mm/as_ht.h>
#include <genarch/mm/page_ht.h>
#include <genarch/mm/asid_fifo.h>
#include <mm/asid.h>
#include <arch.h>
#include <arch/barrier.h>
#include <synch/spinlock.h>
 
/** Architecture dependent address space init. */
void as_arch_init(void)
55,13 → 54,9
*/
void as_install_arch(as_t *as)
{
ipl_t ipl;
region_register rr;
int i;
ipl = interrupts_disable();
spinlock_lock(&as->lock);
ASSERT(as->asid != ASID_INVALID);
/*
80,9 → 75,6
}
srlz_d();
srlz_i();
spinlock_unlock(&as->lock);
interrupts_restore(ipl);
}
 
/** @}
/branches/fs/kernel/arch/arm32/include/types.h
62,6 → 62,7
typedef int32_t native_t;
 
typedef uint8_t bool;
typedef uint64_t thread_id_t;
typedef uint64_t task_id_t;
typedef uint32_t context_id_t;
 
/branches/fs/kernel/arch/arm32/src/arm32.c
82,5 → 82,11
/* TODO */
}
 
void arch_reboot(void)
{
// TODO
while (1);
}
 
/** @}
*/
/branches/fs/kernel/arch/ppc32/include/types.h
62,6 → 62,7
typedef int32_t native_t;
 
typedef uint8_t bool;
typedef uint64_t thread_id_t;
typedef uint64_t task_id_t;
typedef uint32_t context_id_t;
 
/branches/fs/kernel/arch/ppc32/src/mm/as.c
54,12 → 54,8
void as_install_arch(as_t *as)
{
asid_t asid;
ipl_t ipl;
uint32_t sr;
 
ipl = interrupts_disable();
spinlock_lock(&as->lock);
asid = as->asid;
/* Lower 2 GB, user and supervisor access */
79,9 → 75,6
: "r" ((0x4000 << 16) + (asid << 4) + sr), "r" (sr << 28)
);
}
spinlock_unlock(&as->lock);
interrupts_restore(ipl);
}
 
/** @}
/branches/fs/kernel/arch/ppc32/src/mm/page.c
53,7 → 53,7
uintptr_t virtaddr = PA2KA(last_frame);
pfn_t i;
for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++)
page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE);
page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE | PAGE_WRITE);
last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE);
/branches/fs/kernel/arch/ppc32/src/interrupt.c
60,11 → 60,19
int inum;
while ((inum = pic_get_pending()) != -1) {
bool ack = false;
irq_t *irq = irq_dispatch_and_lock(inum);
if (irq) {
/*
* The IRQ handler was found.
*/
if (irq->preack) {
/* Acknowledge the interrupt before processing */
pic_ack_interrupt(inum);
ack = true;
}
irq->handler(irq, irq->arg);
spinlock_unlock(&irq->lock);
} else {
75,7 → 83,9
printf("cpu%d: spurious interrupt (inum=%d)\n", CPU->id, inum);
#endif
}
pic_ack_interrupt(inum);
if (!ack)
pic_ack_interrupt(inum);
}
}
 
/branches/fs/kernel/arch/ppc32/src/drivers/cuda.c
48,7 → 48,8
#define PACKET_ADB 0x00
#define PACKET_CUDA 0x01
 
#define CUDA_POWERDOWN 0x0a
#define CUDA_POWERDOWN 0x0a
#define CUDA_RESET 0x11
 
#define RS 0x200
#define B (0 * RS)
191,9 → 192,6
};
 
 
void send_packet(const uint8_t kind, index_t count, ...);
 
 
static void receive_packet(uint8_t *kind, index_t count, uint8_t data[])
{
cuda[B] = cuda[B] & ~TIP;
316,7 → 314,7
}
 
 
void send_packet(const uint8_t kind, index_t count, ...)
static void send_packet(const uint8_t kind, count_t count, ...)
{
index_t i;
va_list va;
341,13 → 339,17
 
 
void cpu_halt(void) {
#ifdef CONFIG_POWEROFF
send_packet(PACKET_CUDA, 1, CUDA_POWERDOWN);
#endif
asm volatile (
"b 0\n"
);
}
 
void arch_reboot(void) {
send_packet(PACKET_CUDA, 1, CUDA_RESET);
asm volatile (
"b 0\n"
);
}
 
/** @}
*/
/branches/fs/kernel/arch/ia32xen/include/types.h
62,6 → 62,7
typedef int32_t native_t;
 
typedef uint8_t bool;
typedef uint64_t thread_id_t;
typedef uint64_t task_id_t;
typedef uint32_t context_id_t;
 
/branches/fs/kernel/arch/ia32xen/src/ia32xen.c
211,5 → 211,11
{
}
 
void arch_reboot(void)
{
// TODO
while (1);
}
 
/** @}
*/
/branches/fs/kernel/arch/ia32xen/src/smp/smp.c
80,13 → 80,13
 
if (config.cpu_count > 1) {
page_mapping_insert(AS_KERNEL, l_apic_address, (uintptr_t) l_apic,
PAGE_NOT_CACHEABLE);
PAGE_NOT_CACHEABLE | PAGE_WRITE);
page_mapping_insert(AS_KERNEL, io_apic_address, (uintptr_t) io_apic,
PAGE_NOT_CACHEABLE);
PAGE_NOT_CACHEABLE | PAGE_WRITE);
l_apic = (uint32_t *) l_apic_address;
io_apic = (uint32_t *) io_apic_address;
}
}
}
 
/*
/branches/fs/kernel/arch/ia32xen/src/interrupt.c
176,14 → 176,21
ASSERT(n >= IVT_IRQBASE);
int inum = n - IVT_IRQBASE;
bool ack = false;
ASSERT(inum < IRQ_COUNT);
ASSERT((inum != IRQ_PIC_SPUR) && (inum != IRQ_PIC1));
 
irq_t *irq = irq_dispatch_and_lock(inum);
if (irq) {
/*
* The IRQ handler was found.
*/
if (irq->preack) {
/* Send EOI before processing the interrupt */
trap_virtual_eoi();
ack = true;
}
irq->handler(irq, irq->arg);
spinlock_unlock(&irq->lock);
} else {
194,7 → 201,9
printf("cpu%d: spurious interrupt (inum=%d)\n", CPU->id, inum);
#endif
}
trap_virtual_eoi();
if (!ack)
trap_virtual_eoi();
}
 
void interrupt_init(void)
/branches/fs/kernel/arch/amd64/include/types.h
62,6 → 62,7
typedef int64_t native_t;
 
typedef uint8_t bool;
typedef uint64_t thread_id_t;
typedef uint64_t task_id_t;
typedef uint32_t context_id_t;
 
/branches/fs/kernel/arch/amd64/include/asm.h
55,10 → 55,17
return v;
}
 
static inline void cpu_sleep(void) { __asm__ volatile ("hlt\n"); };
static inline void cpu_halt(void) { __asm__ volatile ("hlt\n"); };
static inline void cpu_sleep(void)
{
asm volatile ("hlt\n");
};
 
static inline void cpu_halt(void)
{
asm volatile ("hlt\n");
};
 
 
/** Byte from port
*
* Get byte from port
/branches/fs/kernel/arch/amd64/src/pm.c
33,6 → 33,7
/** @file
*/
 
#include <arch.h>
#include <arch/pm.h>
#include <arch/asm.h>
#include <mm/as.h>
227,5 → 228,24
tr_load(gdtselector(TSS_DES));
}
 
/* Reboot the machine by initiating
* a triple fault
*/
void arch_reboot(void)
{
preemption_disable();
ipl_t ipl = interrupts_disable();
memsetb((uintptr_t) idt, sizeof(idt), 0);
idtr_load(&idtr);
interrupts_restore(ipl);
asm volatile (
"int $0x03\n"
"cli\n"
"hlt\n"
);
}
 
/** @}
*/
/branches/fs/kernel/arch/amd64/src/boot/vga323.pal
0,0 → 1,0
link ../../../ia32/src/boot/vga323.pal
Property changes:
Added: svn:special
+*
\ No newline at end of property
/branches/fs/kernel/arch/amd64/src/boot/boot.S
72,8 → 72,26
movl %eax, grub_eax # save parameters from GRUB
movl %ebx, grub_ebx
# Protected 32-bit. We want to reuse the code-seg descriptor,
# the Default operand size must not be 1 when entering long mode
movl $0x80000000, %eax
cpuid
cmp $0x80000000, %eax # any function > 80000000h?
jbe long_mode_unsupported
movl $(AMD_CPUID_EXTENDED), %eax # Extended function code 80000001
cpuid
bt $29, %edx # Test if long mode is supported.
jc long_mode_supported
 
long_mode_unsupported:
movl $long_mode_msg, %esi
jmp error_halt
long_mode_supported:
#ifdef CONFIG_FB
mov $vesa_init, %esi;
mov $vesa_init, %esi
mov $VESA_INIT_SEGMENT << 4, %edi
mov $e_vesa_init - vesa_init, %ecx
cld
92,25 → 110,7
shr $16, %ebx
mov %bx, KA2PA(vesa_bpp)
#endif
 
# Protected 32-bit. We want to reuse the code-seg descriptor,
# the Default operand size must not be 1 when entering long mode
movl $0x80000000, %eax
cpuid
cmp $0x80000000, %eax # any function > 80000000h?
jbe long_mode_unsupported
movl $(AMD_CPUID_EXTENDED), %eax # Extended function code 80000001
cpuid
bt $29, %edx # Test if long mode is supported.
jc long_mode_supported
 
long_mode_unsupported:
cli
hlt
long_mode_supported:
# Enable 64-bit page transaltion entries - CR4.PAE = 1.
# Paging is not enabled until after long mode is enabled
303,11 → 303,12
#define VESA_INFO_SIZE 1024
 
#define VESA_MODE_ATTRIBUTES_OFFSET 0
#define VESA_MODE_LIST_PTR_OFFSET 14
#define VESA_MODE_SCANLINE_OFFSET 16
#define VESA_MODE_WIDTH_OFFSET 18
#define VESA_MODE_HEIGHT_OFFSET 20
#define VESA_MODE_BPP_OFFSET 25
#define VESA_MODE_SCANLINE_OFFSET 16
#define VESA_MODE_PHADDR_OFFSET 40
 
#define VESA_END_OF_MODES 0xffff
317,14 → 318,14
#define VESA_GET_INFO 0x4f00
#define VESA_GET_MODE_INFO 0x4f01
#define VESA_SET_MODE 0x4f02
#define VESA_SET_PALETTE 0x4f09
 
#define CONFIG_VESA_BPP_a 255
 
#if CONFIG_VESA_BPP == 24
#undef CONFIG_VESA_BPP_a
#define CONFIG_VESA_BPP_a 32
#define CONFIG_VESA_BPP_VARIANT 32
#endif
 
mov $VESA_GET_INFO, %ax
mov $e_vesa_init - vesa_init, %di
push %di
339,7 → 340,7
mov VESA_MODE_LIST_PTR_OFFSET(%di), %si
add $VESA_INFO_SIZE, %di
 
1:# Try next mode
mov %gs:(%si), %cx
cmp $VESA_END_OF_MODES, %cx
369,10 → 370,13
mov $CONFIG_VESA_BPP, %al
cmp VESA_MODE_BPP_OFFSET(%di), %al
 
#ifdef CONFIG_VESA_BPP_VARIANT
jz 2f
mov $CONFIG_VESA_BPP_a, %al
mov $CONFIG_VESA_BPP_VARIANT, %al
cmp VESA_MODE_BPP_OFFSET(%di), %al
#endif
jnz 1b
2:
386,7 → 390,60
pop %di
cmp $VESA_OK, %al
jnz 0f
 
#if CONFIG_VESA_BPP == 8
# Set 3:2:3 VGA palette
mov VESA_MODE_ATTRIBUTES_OFFSET(%di), %ax
push %di
mov $vga323 - vesa_init, %di
mov $0x100, %ecx
bt $5, %ax # Test if VGA compatible registers are present
jnc vga_compat
# Try VESA routine to set palette
mov $VESA_SET_PALETTE, %ax
xor %bl, %bl
xor %dx, %dx
int $0x10
jmp vga_not_compat
vga_compat:
# Try VGA registers to set palette
movw $0x3c6, %dx # Set palette mask
movb $0xff, %al
outb %al, %dx
movw $0x3c8, %dx # First index to set
xor %al, %al
outb %al, %dx
movw $0x3c9, %dx # Data port
vga_loop:
movb %es:2(%di), %al
outb %al, %dx
movb %es:1(%di), %al
outb %al, %dx
movb %es:(%di), %al
outb %al, %dx
addw $4, %di
loop vga_loop
vga_not_compat:
pop %di
#endif
mov VESA_MODE_PHADDR_OFFSET(%di), %esi
mov VESA_MODE_WIDTH_OFFSET(%di), %ax
shl $16, %eax
427,8 → 484,10
mov $0xffffffff, %edi # EGA text mode used, because of problems with VESA
xor %ax, %ax
jz 8b # Force relative jump
 
vga323:
#include "vga323.pal"
.code32
vesa_init_protect:
movw $gdtselector(KDATA_DES), %cx
441,11 → 500,73
movw %cx, %fs
movw %cx, %gs
movl $START_STACK, %esp # initialize stack pointer
jmpl $gdtselector(KTEXT32_DES), $vesa_meeting_point
.align 4
e_vesa_init:
#endif
#endif
 
# Print string from %esi to EGA display (in red) and halt
error_halt:
movl $0xb8000, %edi # base of EGA text mode memory
xorl %eax, %eax
movw $0x3d4, %dx # read bits 8 - 15 of the cursor address
movb $0xe, %al
outb %al, %dx
movw $0x3d5, %dx
inb %dx, %al
shl $8, %ax
movw $0x3d4, %dx # read bits 0 - 7 of the cursor address
movb $0xf, %al
outb %al, %dx
movw $0x3d5, %dx
inb %dx, %al
cmp $1920, %ax
jbe cursor_ok
movw $1920, %ax # sanity check for the cursor on the last line
cursor_ok:
movw %ax, %bx
shl $1, %eax
addl %eax, %edi
movw $0x0c00, %ax # black background, light red foreground
cld
ploop:
lodsb
cmp $0, %al
je ploop_end
stosw
inc %bx
jmp ploop
ploop_end:
movw $0x3d4, %dx # write bits 8 - 15 of the cursor address
movb $0xe, %al
outb %al, %dx
movw $0x3d5, %dx
movb %bh, %al
outb %al, %dx
movw $0x3d4, %dx # write bits 0 - 7 of the cursor address
movb $0xf, %al
outb %al, %dx
movw $0x3d5, %dx
movb %bl, %al
outb %al, %dx
cli
hlt
.section K_DATA_START, "aw", @progbits
.align 4096
513,3 → 634,6
 
grub_ebx:
.long 0
 
long_mode_msg:
.asciz "64 bit long mode not supported. System halted."
/branches/fs/kernel/arch/amd64/src/mm/page.c
83,7 → 83,7
{
uintptr_t cur;
int i;
int identity_flags = PAGE_CACHEABLE | PAGE_EXEC | PAGE_GLOBAL;
int identity_flags = PAGE_CACHEABLE | PAGE_EXEC | PAGE_GLOBAL | PAGE_WRITE;
 
if (config.cpu_active == 1) {
page_mapping_operations = &pt_mapping_operations;
112,10 → 112,8
 
exc_register(14, "page_fault", (iroutine) page_fault);
write_cr3((uintptr_t) AS_KERNEL->genarch.page_table);
}
else {
} else
write_cr3((uintptr_t) AS_KERNEL->genarch.page_table);
}
}
 
 
208,7 → 206,7
uintptr_t virtaddr = PA2KA(last_frame);
pfn_t i;
for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++)
page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE);
page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE | PAGE_WRITE);
last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE);
/branches/fs/kernel/arch/amd64/src/interrupt.c
156,14 → 156,21
ASSERT(n >= IVT_IRQBASE);
int inum = n - IVT_IRQBASE;
bool ack = false;
ASSERT(inum < IRQ_COUNT);
ASSERT((inum != IRQ_PIC_SPUR) && (inum != IRQ_PIC1));
 
irq_t *irq = irq_dispatch_and_lock(inum);
if (irq) {
/*
* The IRQ handler was found.
*/
if (irq->preack) {
/* Send EOI before processing the interrupt */
trap_virtual_eoi();
ack = true;
}
irq->handler(irq, irq->arg);
spinlock_unlock(&irq->lock);
} else {
174,7 → 181,9
printf("cpu%d: spurious interrupt (inum=%d)\n", CPU->id, inum);
#endif
}
trap_virtual_eoi();
if (!ack)
trap_virtual_eoi();
}
 
void interrupt_init(void)
/branches/fs/kernel/arch/ppc64/include/types.h
62,6 → 62,7
typedef int64_t native_t;
 
typedef uint8_t bool;
typedef uint64_t thread_id_t;
typedef uint64_t task_id_t;
typedef uint32_t context_id_t;
 
/branches/fs/kernel/arch/ppc64/src/mm/as.c
26,7 → 26,7
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup ppc64mm
/** @addtogroup ppc64mm
* @{
*/
/** @file
41,6 → 41,6
as_operations = &as_pt_operations;
}
 
/** @}
/** @}
*/
 
/branches/fs/kernel/arch/ppc64/src/mm/page.c
264,10 → 264,8
uintptr_t cur;
int flags;
/* Frames below 128 MB are mapped using BAT,
map rest of the physical memory */
for (cur = 128 << 20; cur < last_frame; cur += FRAME_SIZE) {
flags = PAGE_CACHEABLE;
flags = PAGE_CACHEABLE | PAGE_WRITE;
if ((PA2KA(cur) >= config.base) && (PA2KA(cur) < config.base + config.kernel_size))
flags |= PAGE_GLOBAL;
page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags);
296,7 → 294,7
uintptr_t virtaddr = PA2KA(last_frame);
pfn_t i;
for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++)
page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE);
page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE | PAGE_WRITE);
last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE);
/branches/fs/kernel/arch/ppc64/src/interrupt.c
60,11 → 60,19
int inum;
while ((inum = pic_get_pending()) != -1) {
bool ack = false;
irq_t *irq = irq_dispatch_and_lock(inum);
if (irq) {
/*
* The IRQ handler was found.
*/
if (irq->preack) {
/* Acknowledge the interrupt before processing */
pic_ack_interrupt(inum);
ack = true;
}
irq->handler(irq, irq->arg);
spinlock_unlock(&irq->lock);
} else {
75,7 → 83,9
printf("cpu%d: spurious interrupt (inum=%d)\n", CPU->id, inum);
#endif
}
pic_ack_interrupt(inum);
if (!ack)
pic_ack_interrupt(inum);
}
}
 
/branches/fs/kernel/arch/ppc64/src/ppc64.c
128,6 → 128,7
void arch_grab_console(void)
{
}
 
/** Return console to userspace
*
*/
135,5 → 136,11
{
}
 
void arch_reboot(void)
{
// TODO
while (1);
}
 
/** @}
*/
/branches/fs/kernel/arch/mips32/_link.ld.in
10,7 → 10,7
 
OUTPUT_ARCH(mips)
 
ENTRY(kernel_image_start)
ENTRY(kernel_image_start)
 
SECTIONS {
. = KERNEL_LOAD_ADDRESS;
31,9 → 31,10
*(.rodata*);
*(.sdata);
*(.reginfo);
/* Unfortunately IRIX does not allow us
* to include this as a last section :-(
* BSS/SBSS addresses will be wrong */
*(.sbss);
*(.scommon);
*(.bss); /* uninitialized static variables */
*(COMMON); /* global variables */
symbol_table = .;
*(symtab.*);
}
40,14 → 41,6
_gp = . + 0x8000;
.lit8 : { *(.lit8) }
.lit4 : { *(.lit4) }
.sbss : {
*(.sbss);
*(.scommon);
}
.bss : {
*(.bss); /* uninitialized static variables */
*(COMMON); /* global variables */
}
 
kdata_end = .;
 
/branches/fs/kernel/arch/mips32/include/types.h
62,6 → 62,7
typedef int32_t native_t;
 
typedef uint8_t bool;
typedef uint64_t thread_id_t;
typedef uint64_t task_id_t;
typedef uint32_t context_id_t;
 
/branches/fs/kernel/arch/mips32/src/mips32.c
178,5 → 178,11
return 0;
}
 
void arch_reboot(void)
{
___halt();
while (1);
}
 
/** @}
*/
/branches/fs/kernel/arch/mips32/src/mm/as.c
34,12 → 34,12
 
#include <arch/mm/as.h>
#include <genarch/mm/as_pt.h>
#include <genarch/mm/page_pt.h>
#include <genarch/mm/asid_fifo.h>
#include <arch/mm/tlb.h>
#include <mm/tlb.h>
#include <mm/as.h>
#include <arch/cp0.h>
#include <arch.h>
 
/** Architecture dependent address space init. */
void as_arch_init(void)
57,7 → 57,6
void as_install_arch(as_t *as)
{
entry_hi_t hi;
ipl_t ipl;
 
/*
* Install ASID.
64,12 → 63,8
*/
hi.value = cp0_entry_hi_read();
 
ipl = interrupts_disable();
spinlock_lock(&as->lock);
hi.asid = as->asid;
cp0_entry_hi_write(hi.value);
spinlock_unlock(&as->lock);
interrupts_restore(ipl);
}
 
/** @}
/branches/fs/kernel/arch/ia32/include/cpuid.h
49,13 → 49,11
unsigned : 31;
} __attribute__ ((packed));
 
typedef union cpuid_extended_feature_info
{
typedef union cpuid_extended_feature_info {
struct __cpuid_extended_feature_info bits;
uint32_t word;
}cpuid_extended_feature_info;
uint32_t word;
} cpuid_extended_feature_info;
 
 
struct __cpuid_feature_info {
unsigned : 23;
unsigned mmx : 1;
65,11 → 63,10
unsigned : 5;
} __attribute__ ((packed));
 
typedef union cpuid_feature_info
{
typedef union cpuid_feature_info {
struct __cpuid_feature_info bits;
uint32_t word ;
}cpuid_feature_info;
uint32_t word;
} cpuid_feature_info;
 
 
static inline uint32_t has_cpuid(void)
100,16 → 97,9
static inline void cpuid(uint32_t cmd, cpu_info_t *info)
{
asm volatile (
"movl %4, %%eax\n"
"cpuid\n"
"movl %%eax, %0\n"
"movl %%ebx, %1\n"
"movl %%ecx, %2\n"
"movl %%edx, %3\n"
: "=m" (info->cpuid_eax), "=m" (info->cpuid_ebx), "=m" (info->cpuid_ecx), "=m" (info->cpuid_edx)
: "m" (cmd)
: "eax", "ebx", "ecx", "edx"
: "=a" (info->cpuid_eax), "=b" (info->cpuid_ebx), "=c" (info->cpuid_ecx), "=d" (info->cpuid_edx)
: "a" (cmd)
);
}
 
/branches/fs/kernel/arch/ia32/include/types.h
62,6 → 62,7
typedef int32_t native_t;
 
typedef uint8_t bool;
typedef uint64_t thread_id_t;
typedef uint64_t task_id_t;
typedef uint32_t context_id_t;
 
/branches/fs/kernel/arch/ia32/include/asm.h
59,12 → 59,12
*/
static inline void cpu_halt(void)
{
asm("hlt\n");
asm volatile ("hlt\n");
};
 
static inline void cpu_sleep(void)
{
asm("hlt\n");
asm volatile ("hlt\n");
};
 
#define GEN_READ_REG(reg) static inline unative_t read_ ##reg (void) \
/branches/fs/kernel/arch/ia32/src/pm.c
121,7 → 121,7
void idt_init(void)
{
idescriptor_t *d;
int i;
unsigned int i;
 
for (i = 0; i < IDT_ITEMS; i++) {
d = &idt[i];
230,5 → 230,28
gdtr_load(&cpugdtr);
}
 
/* Reboot the machine by initiating
* a triple fault
*/
void arch_reboot(void)
{
preemption_disable();
ipl_t ipl = interrupts_disable();
memsetb((uintptr_t) idt, sizeof(idt), 0);
ptr_16_32_t idtr;
idtr.limit = sizeof(idt);
idtr.base = (uintptr_t) idt;
idtr_load(&idtr);
interrupts_restore(ipl);
asm volatile (
"int $0x03\n"
"cli\n"
"hlt\n"
);
}
 
/** @}
*/
/branches/fs/kernel/arch/ia32/src/smp/smp.c
82,13 → 82,13
 
if (config.cpu_count > 1) {
page_mapping_insert(AS_KERNEL, l_apic_address, (uintptr_t) l_apic,
PAGE_NOT_CACHEABLE);
PAGE_NOT_CACHEABLE | PAGE_WRITE);
page_mapping_insert(AS_KERNEL, io_apic_address, (uintptr_t) io_apic,
PAGE_NOT_CACHEABLE);
PAGE_NOT_CACHEABLE | PAGE_WRITE);
l_apic = (uint32_t *) l_apic_address;
io_apic = (uint32_t *) io_apic_address;
}
}
}
 
/*
/branches/fs/kernel/arch/ia32/src/smp/apic.c
139,7 → 139,14
 
static void l_apic_timer_irq_handler(irq_t *irq, void *arg, ...)
{
/*
* Holding a spinlock could prevent clock() from preempting
* the current thread. In this case, we don't need to hold the
* irq->lock so we just unlock it and then lock it again.
*/
spinlock_unlock(&irq->lock);
clock();
spinlock_lock(&irq->lock);
}
 
/** Initialize APIC on BSP. */
162,6 → 169,7
io_apic_disable_irqs(0xffff);
irq_initialize(&l_apic_timer_irq);
l_apic_timer_irq.preack = true;
l_apic_timer_irq.devno = device_assign_devno();
l_apic_timer_irq.inr = IRQ_CLK;
l_apic_timer_irq.claim = l_apic_timer_claim;
/branches/fs/kernel/arch/ia32/src/boot/vga323.pal
0,0 → 1,256
.byte 0x00, 0x00, 0x00, 0x00
.byte 0x09, 0x00, 0x00, 0x00
.byte 0x12, 0x00, 0x00, 0x00
.byte 0x1b, 0x00, 0x00, 0x00
.byte 0x24, 0x00, 0x00, 0x00
.byte 0x2d, 0x00, 0x00, 0x00
.byte 0x36, 0x00, 0x00, 0x00
.byte 0x3f, 0x00, 0x00, 0x00
.byte 0x00, 0x15, 0x00, 0x00
.byte 0x09, 0x15, 0x00, 0x00
.byte 0x12, 0x15, 0x00, 0x00
.byte 0x1b, 0x15, 0x00, 0x00
.byte 0x24, 0x15, 0x00, 0x00
.byte 0x2d, 0x15, 0x00, 0x00
.byte 0x36, 0x15, 0x00, 0x00
.byte 0x3f, 0x15, 0x00, 0x00
.byte 0x00, 0x2a, 0x00, 0x00
.byte 0x09, 0x2a, 0x00, 0x00
.byte 0x12, 0x2a, 0x00, 0x00
.byte 0x1b, 0x2a, 0x00, 0x00
.byte 0x24, 0x2a, 0x00, 0x00
.byte 0x2d, 0x2a, 0x00, 0x00
.byte 0x36, 0x2a, 0x00, 0x00
.byte 0x3f, 0x2a, 0x00, 0x00
.byte 0x00, 0x3f, 0x00, 0x00
.byte 0x09, 0x3f, 0x00, 0x00
.byte 0x12, 0x3f, 0x00, 0x00
.byte 0x1b, 0x3f, 0x00, 0x00
.byte 0x24, 0x3f, 0x00, 0x00
.byte 0x2d, 0x3f, 0x00, 0x00
.byte 0x36, 0x3f, 0x00, 0x00
.byte 0x3f, 0x3f, 0x00, 0x00
.byte 0x00, 0x00, 0x09, 0x00
.byte 0x09, 0x00, 0x09, 0x00
.byte 0x12, 0x00, 0x09, 0x00
.byte 0x1b, 0x00, 0x09, 0x00
.byte 0x24, 0x00, 0x09, 0x00
.byte 0x2d, 0x00, 0x09, 0x00
.byte 0x36, 0x00, 0x09, 0x00
.byte 0x3f, 0x00, 0x09, 0x00
.byte 0x00, 0x15, 0x09, 0x00
.byte 0x09, 0x15, 0x09, 0x00
.byte 0x12, 0x15, 0x09, 0x00
.byte 0x1b, 0x15, 0x09, 0x00
.byte 0x24, 0x15, 0x09, 0x00
.byte 0x2d, 0x15, 0x09, 0x00
.byte 0x36, 0x15, 0x09, 0x00
.byte 0x3f, 0x15, 0x09, 0x00
.byte 0x00, 0x2a, 0x09, 0x00
.byte 0x09, 0x2a, 0x09, 0x00
.byte 0x12, 0x2a, 0x09, 0x00
.byte 0x1b, 0x2a, 0x09, 0x00
.byte 0x24, 0x2a, 0x09, 0x00
.byte 0x2d, 0x2a, 0x09, 0x00
.byte 0x36, 0x2a, 0x09, 0x00
.byte 0x3f, 0x2a, 0x09, 0x00
.byte 0x00, 0x3f, 0x09, 0x00
.byte 0x09, 0x3f, 0x09, 0x00
.byte 0x12, 0x3f, 0x09, 0x00
.byte 0x1b, 0x3f, 0x09, 0x00
.byte 0x24, 0x3f, 0x09, 0x00
.byte 0x2d, 0x3f, 0x09, 0x00
.byte 0x36, 0x3f, 0x09, 0x00
.byte 0x3f, 0x3f, 0x09, 0x00
.byte 0x00, 0x00, 0x12, 0x00
.byte 0x09, 0x00, 0x12, 0x00
.byte 0x12, 0x00, 0x12, 0x00
.byte 0x1b, 0x00, 0x12, 0x00
.byte 0x24, 0x00, 0x12, 0x00
.byte 0x2d, 0x00, 0x12, 0x00
.byte 0x36, 0x00, 0x12, 0x00
.byte 0x3f, 0x00, 0x12, 0x00
.byte 0x00, 0x15, 0x12, 0x00
.byte 0x09, 0x15, 0x12, 0x00
.byte 0x12, 0x15, 0x12, 0x00
.byte 0x1b, 0x15, 0x12, 0x00
.byte 0x24, 0x15, 0x12, 0x00
.byte 0x2d, 0x15, 0x12, 0x00
.byte 0x36, 0x15, 0x12, 0x00
.byte 0x3f, 0x15, 0x12, 0x00
.byte 0x00, 0x2a, 0x12, 0x00
.byte 0x09, 0x2a, 0x12, 0x00
.byte 0x12, 0x2a, 0x12, 0x00
.byte 0x1b, 0x2a, 0x12, 0x00
.byte 0x24, 0x2a, 0x12, 0x00
.byte 0x2d, 0x2a, 0x12, 0x00
.byte 0x36, 0x2a, 0x12, 0x00
.byte 0x3f, 0x2a, 0x12, 0x00
.byte 0x00, 0x3f, 0x12, 0x00
.byte 0x09, 0x3f, 0x12, 0x00
.byte 0x12, 0x3f, 0x12, 0x00
.byte 0x1b, 0x3f, 0x12, 0x00
.byte 0x24, 0x3f, 0x12, 0x00
.byte 0x2d, 0x3f, 0x12, 0x00
.byte 0x36, 0x3f, 0x12, 0x00
.byte 0x3f, 0x3f, 0x12, 0x00
.byte 0x00, 0x00, 0x1b, 0x00
.byte 0x09, 0x00, 0x1b, 0x00
.byte 0x12, 0x00, 0x1b, 0x00
.byte 0x1b, 0x00, 0x1b, 0x00
.byte 0x24, 0x00, 0x1b, 0x00
.byte 0x2d, 0x00, 0x1b, 0x00
.byte 0x36, 0x00, 0x1b, 0x00
.byte 0x3f, 0x00, 0x1b, 0x00
.byte 0x00, 0x15, 0x1b, 0x00
.byte 0x09, 0x15, 0x1b, 0x00
.byte 0x12, 0x15, 0x1b, 0x00
.byte 0x1b, 0x15, 0x1b, 0x00
.byte 0x24, 0x15, 0x1b, 0x00
.byte 0x2d, 0x15, 0x1b, 0x00
.byte 0x36, 0x15, 0x1b, 0x00
.byte 0x3f, 0x15, 0x1b, 0x00
.byte 0x00, 0x2a, 0x1b, 0x00
.byte 0x09, 0x2a, 0x1b, 0x00
.byte 0x12, 0x2a, 0x1b, 0x00
.byte 0x1b, 0x2a, 0x1b, 0x00
.byte 0x24, 0x2a, 0x1b, 0x00
.byte 0x2d, 0x2a, 0x1b, 0x00
.byte 0x36, 0x2a, 0x1b, 0x00
.byte 0x3f, 0x2a, 0x1b, 0x00
.byte 0x00, 0x3f, 0x1b, 0x00
.byte 0x09, 0x3f, 0x1b, 0x00
.byte 0x12, 0x3f, 0x1b, 0x00
.byte 0x1b, 0x3f, 0x1b, 0x00
.byte 0x24, 0x3f, 0x1b, 0x00
.byte 0x2d, 0x3f, 0x1b, 0x00
.byte 0x36, 0x3f, 0x1b, 0x00
.byte 0x3f, 0x3f, 0x1b, 0x00
.byte 0x00, 0x00, 0x24, 0x00
.byte 0x09, 0x00, 0x24, 0x00
.byte 0x12, 0x00, 0x24, 0x00
.byte 0x1b, 0x00, 0x24, 0x00
.byte 0x24, 0x00, 0x24, 0x00
.byte 0x2d, 0x00, 0x24, 0x00
.byte 0x36, 0x00, 0x24, 0x00
.byte 0x3f, 0x00, 0x24, 0x00
.byte 0x00, 0x15, 0x24, 0x00
.byte 0x09, 0x15, 0x24, 0x00
.byte 0x12, 0x15, 0x24, 0x00
.byte 0x1b, 0x15, 0x24, 0x00
.byte 0x24, 0x15, 0x24, 0x00
.byte 0x2d, 0x15, 0x24, 0x00
.byte 0x36, 0x15, 0x24, 0x00
.byte 0x3f, 0x15, 0x24, 0x00
.byte 0x00, 0x2a, 0x24, 0x00
.byte 0x09, 0x2a, 0x24, 0x00
.byte 0x12, 0x2a, 0x24, 0x00
.byte 0x1b, 0x2a, 0x24, 0x00
.byte 0x24, 0x2a, 0x24, 0x00
.byte 0x2d, 0x2a, 0x24, 0x00
.byte 0x36, 0x2a, 0x24, 0x00
.byte 0x3f, 0x2a, 0x24, 0x00
.byte 0x00, 0x3f, 0x24, 0x00
.byte 0x09, 0x3f, 0x24, 0x00
.byte 0x12, 0x3f, 0x24, 0x00
.byte 0x1b, 0x3f, 0x24, 0x00
.byte 0x24, 0x3f, 0x24, 0x00
.byte 0x2d, 0x3f, 0x24, 0x00
.byte 0x36, 0x3f, 0x24, 0x00
.byte 0x3f, 0x3f, 0x24, 0x00
.byte 0x00, 0x00, 0x2d, 0x00
.byte 0x09, 0x00, 0x2d, 0x00
.byte 0x12, 0x00, 0x2d, 0x00
.byte 0x1b, 0x00, 0x2d, 0x00
.byte 0x24, 0x00, 0x2d, 0x00
.byte 0x2d, 0x00, 0x2d, 0x00
.byte 0x36, 0x00, 0x2d, 0x00
.byte 0x3f, 0x00, 0x2d, 0x00
.byte 0x00, 0x15, 0x2d, 0x00
.byte 0x09, 0x15, 0x2d, 0x00
.byte 0x12, 0x15, 0x2d, 0x00
.byte 0x1b, 0x15, 0x2d, 0x00
.byte 0x24, 0x15, 0x2d, 0x00
.byte 0x2d, 0x15, 0x2d, 0x00
.byte 0x36, 0x15, 0x2d, 0x00
.byte 0x3f, 0x15, 0x2d, 0x00
.byte 0x00, 0x2a, 0x2d, 0x00
.byte 0x09, 0x2a, 0x2d, 0x00
.byte 0x12, 0x2a, 0x2d, 0x00
.byte 0x1b, 0x2a, 0x2d, 0x00
.byte 0x24, 0x2a, 0x2d, 0x00
.byte 0x2d, 0x2a, 0x2d, 0x00
.byte 0x36, 0x2a, 0x2d, 0x00
.byte 0x3f, 0x2a, 0x2d, 0x00
.byte 0x00, 0x3f, 0x2d, 0x00
.byte 0x09, 0x3f, 0x2d, 0x00
.byte 0x12, 0x3f, 0x2d, 0x00
.byte 0x1b, 0x3f, 0x2d, 0x00
.byte 0x24, 0x3f, 0x2d, 0x00
.byte 0x2d, 0x3f, 0x2d, 0x00
.byte 0x36, 0x3f, 0x2d, 0x00
.byte 0x3f, 0x3f, 0x2d, 0x00
.byte 0x00, 0x00, 0x36, 0x00
.byte 0x09, 0x00, 0x36, 0x00
.byte 0x12, 0x00, 0x36, 0x00
.byte 0x1b, 0x00, 0x36, 0x00
.byte 0x24, 0x00, 0x36, 0x00
.byte 0x2d, 0x00, 0x36, 0x00
.byte 0x36, 0x00, 0x36, 0x00
.byte 0x3f, 0x00, 0x36, 0x00
.byte 0x00, 0x15, 0x36, 0x00
.byte 0x09, 0x15, 0x36, 0x00
.byte 0x12, 0x15, 0x36, 0x00
.byte 0x1b, 0x15, 0x36, 0x00
.byte 0x24, 0x15, 0x36, 0x00
.byte 0x2d, 0x15, 0x36, 0x00
.byte 0x36, 0x15, 0x36, 0x00
.byte 0x3f, 0x15, 0x36, 0x00
.byte 0x00, 0x2a, 0x36, 0x00
.byte 0x09, 0x2a, 0x36, 0x00
.byte 0x12, 0x2a, 0x36, 0x00
.byte 0x1b, 0x2a, 0x36, 0x00
.byte 0x24, 0x2a, 0x36, 0x00
.byte 0x2d, 0x2a, 0x36, 0x00
.byte 0x36, 0x2a, 0x36, 0x00
.byte 0x3f, 0x2a, 0x36, 0x00
.byte 0x00, 0x3f, 0x36, 0x00
.byte 0x09, 0x3f, 0x36, 0x00
.byte 0x12, 0x3f, 0x36, 0x00
.byte 0x1b, 0x3f, 0x36, 0x00
.byte 0x24, 0x3f, 0x36, 0x00
.byte 0x2d, 0x3f, 0x36, 0x00
.byte 0x36, 0x3f, 0x36, 0x00
.byte 0x3f, 0x3f, 0x36, 0x00
.byte 0x00, 0x00, 0x3f, 0x00
.byte 0x09, 0x00, 0x3f, 0x00
.byte 0x12, 0x00, 0x3f, 0x00
.byte 0x1b, 0x00, 0x3f, 0x00
.byte 0x24, 0x00, 0x3f, 0x00
.byte 0x2d, 0x00, 0x3f, 0x00
.byte 0x36, 0x00, 0x3f, 0x00
.byte 0x3f, 0x00, 0x3f, 0x00
.byte 0x00, 0x15, 0x3f, 0x00
.byte 0x09, 0x15, 0x3f, 0x00
.byte 0x12, 0x15, 0x3f, 0x00
.byte 0x1b, 0x15, 0x3f, 0x00
.byte 0x24, 0x15, 0x3f, 0x00
.byte 0x2d, 0x15, 0x3f, 0x00
.byte 0x36, 0x15, 0x3f, 0x00
.byte 0x3f, 0x15, 0x3f, 0x00
.byte 0x00, 0x2a, 0x3f, 0x00
.byte 0x09, 0x2a, 0x3f, 0x00
.byte 0x12, 0x2a, 0x3f, 0x00
.byte 0x1b, 0x2a, 0x3f, 0x00
.byte 0x24, 0x2a, 0x3f, 0x00
.byte 0x2d, 0x2a, 0x3f, 0x00
.byte 0x36, 0x2a, 0x3f, 0x00
.byte 0x3f, 0x2a, 0x3f, 0x00
.byte 0x00, 0x3f, 0x3f, 0x00
.byte 0x09, 0x3f, 0x3f, 0x00
.byte 0x12, 0x3f, 0x3f, 0x00
.byte 0x1b, 0x3f, 0x3f, 0x00
.byte 0x24, 0x3f, 0x3f, 0x00
.byte 0x2d, 0x3f, 0x3f, 0x00
.byte 0x36, 0x3f, 0x3f, 0x00
.byte 0x3f, 0x3f, 0x3f, 0x00
/branches/fs/kernel/arch/ia32/src/boot/boot.S
63,9 → 63,24
jmpl $selector(KTEXT_DES), $multiboot_meeting_point
multiboot_meeting_point:
pushl %ebx # save parameters from GRUB
pushl %eax
movl %eax, grub_eax # save parameters from GRUB
movl %ebx, grub_ebx
xorl %eax, %eax
cpuid
cmp $0x0, %eax # any function > 0?
jbe pse_unsupported
movl $0x1, %eax # Basic function code 1
cpuid
bt $3, %edx # Test if PSE is supported
jc pse_supported
 
pse_unsupported:
movl $pse_msg, %esi
jmp error_halt
pse_supported:
#ifdef CONFIG_FB
mov $vesa_init, %esi
mov $VESA_INIT_SEGMENT << 4, %edi
89,8 → 104,8
call map_kernel # map kernel and turn paging on
popl %eax
popl %ebx
movl grub_eax, %eax
movl grub_ebx, %ebx
cmpl $MULTIBOOT_LOADER_MAGIC, %eax # compare GRUB signature
je valid_boot
209,7 → 224,7
rep movsb
#endif
 
call main_bsp # never returns
 
cli
222,19 → 237,20
# For simplicity, we map the entire 4G space.
#
movl %cr4, %ecx
orl $(1<<4), %ecx
movl %ecx, %cr4 # turn PSE on
orl $(1 << 4), %ecx # turn PSE on
andl $(~(1 << 5)), %ecx # turn PAE off
movl %ecx, %cr4
movl $(page_directory+0), %esi
movl $(page_directory+2048), %edi
movl $(page_directory + 0), %esi
movl $(page_directory + 2048), %edi
xorl %ecx, %ecx
xorl %ebx, %ebx
0:
movl $((1<<7)|(1<<0)), %eax
movl $((1 << 7) | (1 << 1) | (1 << 0)), %eax
orl %ebx, %eax
movl %eax, (%esi,%ecx,4) # mapping 0x00000000+%ecx*4M => 0x00000000+%ecx*4M
movl %eax, (%edi,%ecx,4) # mapping 0x80000000+%ecx*4M => 0x00000000+%ecx*4M
addl $(4*1024*1024), %ebx
movl %eax, (%esi, %ecx, 4) # mapping 0x00000000 + %ecx * 4M => 0x00000000 + %ecx * 4M
movl %eax, (%edi, %ecx, 4) # mapping 0x80000000 + %ecx * 4M => 0x00000000 + %ecx * 4M
addl $(4 * 1024 * 1024), %ebx
 
incl %ecx
cmpl $512, %ecx
242,12 → 258,71
 
movl %esi, %cr3
# turn paging on
movl %cr0, %ebx
orl $(1<<31), %ebx
orl $(1 << 31), %ebx # turn paging on
movl %ebx, %cr0
ret
 
# Print string from %esi to EGA display (in red) and halt
error_halt:
movl $0xb8000, %edi # base of EGA text mode memory
xorl %eax, %eax
movw $0x3d4, %dx # read bits 8 - 15 of the cursor address
movb $0xe, %al
outb %al, %dx
movw $0x3d5, %dx
inb %dx, %al
shl $8, %ax
movw $0x3d4, %dx # read bits 0 - 7 of the cursor address
movb $0xf, %al
outb %al, %dx
movw $0x3d5, %dx
inb %dx, %al
cmp $1920, %ax
jbe cursor_ok
movw $1920, %ax # sanity check for the cursor on the last line
cursor_ok:
movw %ax, %bx
shl $1, %eax
addl %eax, %edi
movw $0x0c00, %ax # black background, light red foreground
cld
ploop:
lodsb
cmp $0, %al
je ploop_end
stosw
inc %bx
jmp ploop
ploop_end:
movw $0x3d4, %dx # write bits 8 - 15 of the cursor address
movb $0xe, %al
outb %al, %dx
movw $0x3d5, %dx
movb %bh, %al
outb %al, %dx
movw $0x3d4, %dx # write bits 0 - 7 of the cursor address
movb $0xf, %al
outb %al, %dx
movw $0x3d5, %dx
movb %bl, %al
outb %al, %dx
cli
hlt
 
#ifdef CONFIG_FB
vesa_init:
jmp $selector(VESA_INIT_DES), $vesa_init_real - vesa_init
278,11 → 353,12
#define VESA_INFO_SIZE 1024
 
#define VESA_MODE_ATTRIBUTES_OFFSET 0
#define VESA_MODE_LIST_PTR_OFFSET 14
#define VESA_MODE_SCANLINE_OFFSET 16
#define VESA_MODE_WIDTH_OFFSET 18
#define VESA_MODE_HEIGHT_OFFSET 20
#define VESA_MODE_BPP_OFFSET 25
#define VESA_MODE_SCANLINE_OFFSET 16
#define VESA_MODE_PHADDR_OFFSET 40
 
#define VESA_END_OF_MODES 0xffff
292,12 → 368,10
#define VESA_GET_INFO 0x4f00
#define VESA_GET_MODE_INFO 0x4f01
#define VESA_SET_MODE 0x4f02
#define VESA_SET_PALETTE 0x4f09
 
#define CONFIG_VESA_BPP_a 255
 
#if CONFIG_VESA_BPP == 24
#undef CONFIG_VESA_BPP_a
#define CONFIG_VESA_BPP_a 32
#define CONFIG_VESA_BPP_VARIANT 32
#endif
 
mov $VESA_GET_INFO, %ax
338,18 → 412,21
cmp VESA_MODE_WIDTH_OFFSET(%di), %ax
jnz 1b
mov $CONFIG_VESA_HEIGHT,%ax
mov $CONFIG_VESA_HEIGHT, %ax
cmp VESA_MODE_HEIGHT_OFFSET(%di), %ax
jnz 1b
mov $CONFIG_VESA_BPP, %al
cmp VESA_MODE_BPP_OFFSET(%di), %al
 
#ifdef CONFIG_VESA_BPP_VARIANT
jz 2f
mov $CONFIG_VESA_BPP_a, %al
mov $CONFIG_VESA_BPP_VARIANT, %al
cmp VESA_MODE_BPP_OFFSET(%di), %al
#endif
jnz 1b
 
2:
mov %cx, %bx
361,7 → 438,60
pop %di
cmp $VESA_OK, %al
jnz 0f
 
#if CONFIG_VESA_BPP == 8
# Set 3:2:3 VGA palette
mov VESA_MODE_ATTRIBUTES_OFFSET(%di), %ax
push %di
mov $vga323 - vesa_init, %di
mov $0x100, %ecx
bt $5, %ax # Test if VGA compatible registers are present
jnc vga_compat
# Try VESA routine to set palette
mov $VESA_SET_PALETTE, %ax
xor %bl, %bl
xor %dx, %dx
int $0x10
jmp vga_not_compat
vga_compat:
# Try VGA registers to set palette
movw $0x3c6, %dx # Set palette mask
movb $0xff, %al
outb %al, %dx
movw $0x3c8, %dx # First index to set
xor %al, %al
outb %al, %dx
movw $0x3c9, %dx # Data port
vga_loop:
movb %es:2(%di), %al
outb %al, %dx
movb %es:1(%di), %al
outb %al, %dx
movb %es:(%di), %al
outb %al, %dx
addw $4, %di
loop vga_loop
vga_not_compat:
pop %di
#endif
mov VESA_MODE_PHADDR_OFFSET(%di), %esi
mov VESA_MODE_WIDTH_OFFSET(%di), %ax
shl $16, %eax
402,12 → 532,12
mov $0xffffffff, %edi # EGA text mode used, because of problems with VESA
xor %ax, %ax
jz 8b # Force relative jump
 
vga323:
#include "vga323.pal"
 
.code32
vesa_init_protect:
popl %esp
 
movw $selector(KDATA_DES), %cx
movw %cx, %es
movw %cx, %fs
415,6 → 545,8
movw %cx, %ds # kernel data + stack
movw %cx, %ss
movl $START_STACK, %esp # initialize stack pointer
 
jmpl $selector(KTEXT_DES), $vesa_meeting_point
 
.align 4
426,3 → 558,12
.align 4096
page_directory:
.space 4096, 0
 
grub_eax:
.long 0
 
grub_ebx:
.long 0
 
pse_msg:
.asciz "Page Size Extension not supported. System halted."
/branches/fs/kernel/arch/ia32/src/mm/page.c
61,7 → 61,7
* PA2KA(identity) mapping for all frames until last_frame.
*/
for (cur = 0; cur < last_frame; cur += FRAME_SIZE) {
flags = PAGE_CACHEABLE;
flags = PAGE_CACHEABLE | PAGE_WRITE;
if ((PA2KA(cur) >= config.base) && (PA2KA(cur) < config.base + config.kernel_size))
flags |= PAGE_GLOBAL;
page_mapping_insert(AS_KERNEL, PA2KA(cur), cur, flags);
69,10 → 69,8
 
exc_register(14, "page_fault", (iroutine) page_fault);
write_cr3((uintptr_t) AS_KERNEL->genarch.page_table);
}
else {
} else
write_cr3((uintptr_t) AS_KERNEL->genarch.page_table);
}
 
paging_on();
}
86,7 → 84,7
uintptr_t virtaddr = PA2KA(last_frame);
pfn_t i;
for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++)
page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE);
page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE | PAGE_WRITE);
last_frame = ALIGN_UP(last_frame + size, FRAME_SIZE);
95,12 → 93,12
 
void page_fault(int n, istate_t *istate)
{
uintptr_t page;
uintptr_t page;
pf_access_t access;
page = read_cr2();
page = read_cr2();
if (istate->error_word & PFERR_CODE_RSVD)
if (istate->error_word & PFERR_CODE_RSVD)
panic("Reserved bit set in page directory.\n");
 
if (istate->error_word & PFERR_CODE_RW)
107,14 → 105,14
access = PF_ACCESS_WRITE;
else
access = PF_ACCESS_READ;
 
if (as_page_fault(page, access, istate) == AS_PF_FAULT) {
if (as_page_fault(page, access, istate) == AS_PF_FAULT) {
fault_if_from_uspace(istate, "Page fault: %#x", page);
 
decode_istate(istate);
printf("page fault address: %#x\n", page);
panic("page fault\n");
}
decode_istate(istate);
printf("page fault address: %#x\n", page);
panic("page fault\n");
}
}
 
/** @}
/branches/fs/kernel/arch/ia32/src/interrupt.c
176,14 → 176,21
ASSERT(n >= IVT_IRQBASE);
int inum = n - IVT_IRQBASE;
bool ack = false;
ASSERT(inum < IRQ_COUNT);
ASSERT((inum != IRQ_PIC_SPUR) && (inum != IRQ_PIC1));
 
irq_t *irq = irq_dispatch_and_lock(inum);
if (irq) {
/*
* The IRQ handler was found.
*/
if (irq->preack) {
/* Send EOI before processing the interrupt */
trap_virtual_eoi();
ack = true;
}
irq->handler(irq, irq->arg);
spinlock_unlock(&irq->lock);
} else {
194,7 → 201,9
printf("cpu%d: spurious interrupt (inum=%d)\n", CPU->id, inum);
#endif
}
trap_virtual_eoi();
if (!ack)
trap_virtual_eoi();
}
 
void interrupt_init(void)
/branches/fs/kernel/arch/ia32/src/drivers/i8254.c
82,6 → 82,7
void i8254_init(void)
{
irq_initialize(&i8254_irq);
i8254_irq.preack = true;
i8254_irq.devno = device_assign_devno();
i8254_irq.inr = IRQ_CLK;
i8254_irq.claim = i8254_claim;