/kernel/trunk/arch/sparc64/src/ddi/ddi.c |
---|
47,7 → 47,7 |
* |
* @return 0 on success or an error code from errno.h. |
*/ |
int ddi_iospace_enable_arch(task_t *task, __address ioaddr, size_t size) |
int ddi_iospace_enable_arch(task_t *task, uintptr_t ioaddr, size_t size) |
{ |
return 0; |
} |
/kernel/trunk/arch/sparc64/src/proc/scheduler.c |
---|
48,18 → 48,18 |
/** Ensure that thread's kernel stack is locked in TLB. */ |
void before_thread_runs_arch(void) |
{ |
__address base; |
uintptr_t base; |
base = ALIGN_DOWN(config.base, 1<<KERNEL_PAGE_WIDTH); |
if ((__address) THREAD->kstack < base || (__address) THREAD->kstack > base + (1<<KERNEL_PAGE_WIDTH)) { |
if ((uintptr_t) THREAD->kstack < base || (uintptr_t) THREAD->kstack > base + (1<<KERNEL_PAGE_WIDTH)) { |
/* |
* Kernel stack of this thread is not locked in DTLB. |
* First, make sure it is not mapped already. |
* If not, create a locked mapping for it. |
*/ |
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (__address) THREAD->kstack); |
dtlb_insert_mapping((__address) THREAD->kstack, KA2PA(THREAD->kstack), PAGESIZE_8K, true, true); |
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t) THREAD->kstack); |
dtlb_insert_mapping((uintptr_t) THREAD->kstack, KA2PA(THREAD->kstack), PAGESIZE_8K, true, true); |
} |
} |
66,16 → 66,16 |
/** Unlock thread's stack from TLB, if necessary. */ |
void after_thread_ran_arch(void) |
{ |
__address base; |
uintptr_t base; |
base = ALIGN_DOWN(config.base, 1<<KERNEL_PAGE_WIDTH); |
if ((__address) THREAD->kstack < base || (__address) THREAD->kstack > base + (1<<KERNEL_PAGE_WIDTH)) { |
if ((uintptr_t) THREAD->kstack < base || (uintptr_t) THREAD->kstack > base + (1<<KERNEL_PAGE_WIDTH)) { |
/* |
* Kernel stack of this thread is locked in DTLB. |
* Destroy the mapping. |
*/ |
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (__address) THREAD->kstack); |
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t) THREAD->kstack); |
} |
} |
/kernel/trunk/arch/sparc64/src/trap/interrupt.c |
---|
52,7 → 52,7 |
} |
/* Reregister irq to be IPC-ready */ |
void irq_ipc_bind_arch(__native irq) |
void irq_ipc_bind_arch(unative_t irq) |
{ |
panic("not implemented\n"); |
/* TODO */ |
/kernel/trunk/arch/sparc64/src/mm/tlb.c |
---|
131,7 → 131,7 |
* @param locked True for permanent mappings, false otherwise. |
* @param cacheable True if the mapping is cacheable, false otherwise. |
*/ |
void dtlb_insert_mapping(__address page, __address frame, int pagesize, bool locked, bool cacheable) |
void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize, bool locked, bool cacheable) |
{ |
tlb_tag_access_reg_t tag; |
tlb_data_t data; |
170,7 → 170,7 |
void fast_data_access_mmu_miss(void) |
{ |
tlb_tag_access_reg_t tag; |
__address tpc; |
uintptr_t tpc; |
char *tpc_str; |
tag.value = dtlb_tag_access_read(); |
268,7 → 268,7 |
* @param page First page which to sweep out from ITLB and DTLB. |
* @param cnt Number of ITLB and DTLB entries to invalidate. |
*/ |
void tlb_invalidate_pages(asid_t asid, __address page, count_t cnt) |
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt) |
{ |
int i; |
/kernel/trunk/arch/sparc64/src/mm/page.c |
---|
44,7 → 44,7 |
page_mapping_operations = &ht_mapping_operations; |
} |
__address hw_map(__address physaddr, size_t size) |
uintptr_t hw_map(uintptr_t physaddr, size_t size) |
{ |
unsigned int order; |
int i; |
73,7 → 73,7 |
else |
order = (fnzb32(size - 1) + 1) - FRAME_WIDTH; |
__address virtaddr = (__address) frame_alloc(order, FRAME_KA); |
uintptr_t virtaddr = (uintptr_t) frame_alloc(order, FRAME_KA); |
for (i = 0; i < sizemap[order].count; i++) |
dtlb_insert_mapping(virtaddr + i*sizemap[order].increment, |
/kernel/trunk/arch/sparc64/src/drivers/i8042.c |
---|
37,11 → 37,11 |
#include <arch/types.h> |
#include <arch/mm/page.h> |
volatile __u8 *kbd_virt_address = NULL; |
volatile uint8_t *kbd_virt_address = NULL; |
void kbd_init() |
{ |
kbd_virt_address = (__u8 *) hw_map(KBD_PHYS_ADDRESS, LAST_REG); |
kbd_virt_address = (uint8_t *) hw_map(KBD_PHYS_ADDRESS, LAST_REG); |
i8042_init(); |
} |