/kernel/trunk/arch/sparc64/include/drivers/i8042.h |
---|
32,7 → 32,7 |
#include <arch/types.h> |
#define KBD_PHYS_ADDRESS 0x1fff8904000ULL |
#define KBD_VIRT_ADDRESS 0x000d0000000ULL |
#define KBD_VIRT_ADDRESS 0x00000d00000ULL |
#define STATUS_REG 4 |
#define COMMAND_REG 4 |
40,7 → 40,7 |
static inline void i8042_data_write(__u8 data) |
{ |
((volatile __u8 *)(KBD_VIRT_ADDRESS))[DATA_REG] = data; |
((__u8 *)(KBD_VIRT_ADDRESS))[DATA_REG] = data; |
} |
static inline __u8 i8042_data_read(void) |
55,7 → 55,7 |
static inline void i8042_command_write(__u8 command) |
{ |
((volatile __u8 *)(KBD_VIRT_ADDRESS))[COMMAND_REG] = command; |
((__u8 *)(KBD_VIRT_ADDRESS))[COMMAND_REG] = command; |
} |
#endif |
/kernel/trunk/arch/sparc64/include/trap/exception.h |
---|
31,13 → 31,11 |
#define TT_INSTRUCTION_ACCESS_EXCEPTION 0x08 |
#define TT_ILLEGAL_INSTRUCTION 0x10 |
#define TT_DATA_ACCESS_ERROR 0x32 |
#define TT_MEM_ADDRESS_NOT_ALIGNED 0x34 |
#ifndef __ASM__ |
extern void do_instruction_access_exc(void); |
extern void do_mem_address_not_aligned(void); |
extern void do_data_access_error(void); |
extern void do_illegal_instruction(void); |
#endif /* !__ASM__ */ |
/kernel/trunk/arch/sparc64/include/mm/tlb.h |
---|
405,6 → 405,4 |
extern void fast_data_access_mmu_miss(void); |
extern void fast_data_access_protection(void); |
extern void dtlb_insert_mapping(__address page, __address frame, int pagesize, bool locked, bool cacheable); |
#endif |
/kernel/trunk/arch/sparc64/Makefile.inc |
---|
81,7 → 81,6 |
arch/$(ARCH)/src/mm/memory_init.c \ |
arch/$(ARCH)/src/sparc64.c \ |
arch/$(ARCH)/src/start.S \ |
arch/$(ARCH)/src/proc/scheduler.c \ |
arch/$(ARCH)/src/trap/trap_table.S \ |
arch/$(ARCH)/src/trap/trap.c \ |
arch/$(ARCH)/src/trap/exception.c \ |
/kernel/trunk/arch/sparc64/src/proc/scheduler.c |
---|
File deleted |
/kernel/trunk/arch/sparc64/src/console.c |
---|
40,7 → 40,6 |
#include <arch/register.h> |
#include <proc/thread.h> |
#include <synch/mutex.h> |
#include <arch/mm/tlb.h> |
#define KEYBOARD_POLL_PAUSE 50000 /* 50ms */ |
76,10 → 75,6 |
{ |
ofw_console_active = 0; |
stdin = NULL; |
dtlb_insert_mapping(FB_VIRT_ADDRESS, FB_PHYS_ADDRESS, PAGESIZE_4M, true, false); |
dtlb_insert_mapping(KBD_VIRT_ADDRESS, KBD_PHYS_ADDRESS, PAGESIZE_8K, true, false); |
fb_init(FB_VIRT_ADDRESS, FB_X_RES, FB_Y_RES, FB_COLOR_DEPTH/8); |
i8042_init(); |
} |
/kernel/trunk/arch/sparc64/src/sparc64.c |
---|
74,3 → 74,7 |
void calibrate_delay_loop(void) |
{ |
} |
void before_thread_runs_arch(void) |
{ |
} |
/kernel/trunk/arch/sparc64/src/mm/tlb.c |
---|
109,26 → 109,37 |
dmmu_enable(); |
immu_enable(); |
} |
/* |
* Quick hack: map frame buffer |
*/ |
fr.address = FB_PHYS_ADDRESS; |
pg.address = FB_VIRT_ADDRESS; |
/** Insert privileged mapping into DMMU TLB. |
* |
* @param page Virtual page address. |
* @param frame Physical frame address. |
* @param pagesize Page size. |
* @param locked True for permanent mappings, false otherwise. |
* @param cacheable True if the mapping is cacheable, false otherwise. |
*/ |
void dtlb_insert_mapping(__address page, __address frame, int pagesize, bool locked, bool cacheable) |
{ |
tlb_tag_access_reg_t tag; |
tlb_data_t data; |
page_address_t pg; |
frame_address_t fr; |
tag.value = ASID_KERNEL; |
tag.vpn = pg.vpn; |
pg.address = page; |
fr.address = frame; |
dtlb_tag_access_write(tag.value); |
data.value = 0; |
data.v = true; |
data.size = PAGESIZE_4M; |
data.pfn = fr.pfn; |
data.l = true; |
data.cp = 0; |
data.cv = 0; |
data.p = true; |
data.w = true; |
data.g = true; |
dtlb_data_in_write(data.value); |
/* |
* Quick hack: map keyboard |
*/ |
fr.address = KBD_PHYS_ADDRESS; |
pg.address = KBD_VIRT_ADDRESS; |
tag.value = ASID_KERNEL; |
tag.vpn = pg.vpn; |
136,11 → 147,11 |
data.value = 0; |
data.v = true; |
data.size = pagesize; |
data.size = PAGESIZE_8K; |
data.pfn = fr.pfn; |
data.l = locked; |
data.cp = cacheable; |
data.cv = cacheable; |
data.l = true; |
data.cp = 0; |
data.cv = 0; |
data.p = true; |
data.w = true; |
data.g = true; |
158,6 → 169,7 |
void fast_data_access_mmu_miss(void) |
{ |
tlb_tag_access_reg_t tag; |
tlb_data_t data; |
__address tpc; |
char *tpc_str; |
174,7 → 186,18 |
/* |
* Identity map piece of faulting kernel address space. |
*/ |
dtlb_insert_mapping(tag.vpn * PAGE_SIZE, tag.vpn * FRAME_SIZE, PAGESIZE_8K, false, true); |
data.value = 0; |
data.v = true; |
data.size = PAGESIZE_8K; |
data.pfn = tag.vpn; |
data.l = false; |
data.cp = 1; |
data.cv = 1; |
data.p = true; |
data.w = true; |
data.g = true; |
dtlb_data_in_write(data.value); |
} |
/** DTLB protection fault handler. */ |
/kernel/trunk/arch/sparc64/src/trap/exception.c |
---|
42,12 → 42,6 |
panic("Memory Address Not Aligned\n"); |
} |
/** Handle data_access_error. */ |
void do_data_access_error(void) |
{ |
panic("Data Access Error: %P\n", tpc_read()); |
} |
/** Handle mem_address_not_aligned. */ |
void do_illegal_instruction(void) |
{ |
/kernel/trunk/arch/sparc64/src/trap/trap_table.S |
---|
72,12 → 72,6 |
clean_window_handler: |
CLEAN_WINDOW_HANDLER |
/* TT = 0x32, TL = 0, data_access_error */ |
.org trap_table + TT_DATA_ACCESS_ERROR*ENTRY_SIZE |
.global data_access_error |
data_access_error: |
SIMPLE_HANDLER do_data_access_error |
/* TT = 0x34, TL = 0, mem_address_not_aligned */ |
.org trap_table + TT_MEM_ADDRESS_NOT_ALIGNED*ENTRY_SIZE |
.global mem_address_not_aligned |
232,12 → 226,6 |
clean_window_handler_high: |
CLEAN_WINDOW_HANDLER |
/* TT = 0x32, TL > 0, data_access_error */ |
.org trap_table + (TT_DATA_ACCESS_ERROR+512)*ENTRY_SIZE |
.global data_access_error_high |
data_access_error_high: |
SIMPLE_HANDLER do_data_access_error |
/* TT = 0x34, TL > 0, mem_address_not_aligned */ |
.org trap_table + (TT_MEM_ADDRESS_NOT_ALIGNED+512)*ENTRY_SIZE |
.global mem_address_not_aligned_high |
/kernel/trunk/arch/amd64/src/proc/scheduler.c |
---|
43,7 → 43,3 |
(__u64)&THREAD->kstack); |
swapgs(); |
} |
void after_thread_ran_arch(void) |
{ |
} |
/kernel/trunk/arch/ia32/src/proc/scheduler.c |
---|
37,7 → 37,3 |
CPU->arch.tss->esp0 = (__address) &THREAD->kstack[THREAD_STACK_SIZE-SP_DELTA]; |
CPU->arch.tss->ss0 = selector(KDATA_DES); |
} |
void after_thread_ran_arch(void) |
{ |
} |
/kernel/trunk/arch/ia64/src/dummy.s |
---|
32,7 → 32,6 |
.global asm_delay_loop |
.global userspace |
.global before_thread_runs_arch |
.global after_thread_ran_arch |
.global cpu_sleep |
.global dummy |
.global fpu_enable |
40,7 → 39,6 |
.global fpu_init |
before_thread_runs_arch: |
after_thread_ran_arch: |
userspace: |
calibrate_delay_loop: |
asm_delay_loop: |
/kernel/trunk/arch/ppc32/src/dummy.s |
---|
31,7 → 31,6 |
.global asm_delay_loop |
.global userspace |
.global before_thread_runs_arch |
.global after_thread_ran_arch |
.global dummy |
.global fpu_init |
.global fpu_enable |
38,7 → 37,6 |
.global fpu_disable |
before_thread_runs_arch: |
after_thread_ran_arch: |
userspace: |
asm_delay_loop: |
fpu_init: |
/kernel/trunk/arch/mips32/src/mips32.c |
---|
134,7 → 134,3 |
{ |
supervisor_sp = (__address) &THREAD->kstack[THREAD_STACK_SIZE-SP_DELTA]; |
} |
void after_thread_ran_arch(void) |
{ |
} |
/kernel/trunk/generic/src/proc/scheduler.c |
---|
49,7 → 49,7 |
atomic_t nrdy; |
/** Take actions before new thread runs. |
/** Take actions before new thread runs |
* |
* Perform actions that need to be |
* taken before the newly selected |
77,20 → 77,6 |
#endif |
} |
/** Take actions after old thread ran. |
* |
* Perform actions that need to be |
* taken after the running thread |
* was preempted by the scheduler. |
* |
* THREAD->lock is locked on entry |
* |
*/ |
void after_thread_ran(void) |
{ |
after_thread_ran_arch(); |
} |
#ifdef CONFIG_FPU_LAZY |
void scheduler_fpu_lazy_request(void) |
{ |
271,9 → 257,6 |
ASSERT(CPU != NULL); |
if (THREAD) { |
/* must be run after switch to scheduler stack */ |
after_thread_ran(); |
switch (THREAD->state) { |
case Running: |
THREAD->state = Ready; |
317,7 → 300,6 |
panic("tid%d: unexpected state %s\n", THREAD->tid, thread_states[THREAD->state]); |
break; |
} |
THREAD = NULL; |
} |
369,16 → 351,6 |
#endif |
/* |
* Some architectures provide late kernel PA2KA(identity) |
* mapping in a page fault handler. However, the page fault |
* handler uses the kernel stack of the running thread and |
* therefore cannot be used to map it. The kernel stack, if |
* necessary, is to be mapped in before_thread_runs(). This |
* function must be executed before the switch to the new stack. |
*/ |
before_thread_runs(); |
/* |
* Copy the knowledge of CPU, TASK, THREAD and preemption counter to thread's stack. |
*/ |
the_copy(THE, (the_t *) THREAD->kstack); |
415,6 → 387,7 |
/* |
* This is the place where threads leave scheduler(); |
*/ |
before_thread_runs(); |
spinlock_unlock(&THREAD->lock); |
interrupts_restore(THREAD->saved_context.ipl); |
return; |
/kernel/trunk/generic/include/proc/scheduler.h |
---|
53,14 → 53,12 |
extern void kcpulb(void *arg); |
extern void before_thread_runs(void); |
extern void after_thread_ran(void); |
extern void sched_print_list(void); |
/* |
* To be defined by architectures: |
*/ |
extern void before_thread_runs_arch(void); |
extern void after_thread_ran_arch(void); |
#endif |