/branches/rcu/kernel/arch/sparc64/src/smp/smp.c |
---|
100,7 → 100,7 |
if (waitq_sleep_timeout(&ap_completion_wq, 1000000, SYNCH_FLAGS_NONE) == ESYNCH_TIMEOUT) |
printf("%s: waiting for processor (mid = %d) timed out\n", |
__FUNCTION__, mid); |
__FUNCTION__, mid); |
} |
} |
/branches/rcu/kernel/arch/sparc64/src/smp/ipi.c |
---|
74,8 → 74,8 |
panic("Interrupt Dispatch Status busy bit set\n"); |
do { |
asi_u64_write(ASI_UDB_INTR_W, ASI_UDB_INTR_W_DATA_0, (uintptr_t) |
func); |
asi_u64_write(ASI_UDB_INTR_W, ASI_UDB_INTR_W_DATA_0, |
(uintptr_t) func); |
asi_u64_write(ASI_UDB_INTR_W, ASI_UDB_INTR_W_DATA_1, 0); |
asi_u64_write(ASI_UDB_INTR_W, ASI_UDB_INTR_W_DATA_2, 0); |
asi_u64_write(ASI_UDB_INTR_W, |
/branches/rcu/kernel/arch/sparc64/src/asm.S |
---|
273,7 → 273,7 |
flushw |
wrpr %g0, 0, %cleanwin ! avoid information leak |
mov %i3, %o0 ! uarg |
mov %i2, %o0 ! uarg |
clr %i2 |
clr %i3 |
/branches/rcu/kernel/arch/sparc64/src/proc/scheduler.c |
---|
62,9 → 62,8 |
* - preemptible trap handler switches to alternate globals |
* before it explicitly uses %g7. |
*/ |
uint64_t sp = (uintptr_t) THREAD->kstack + STACK_SIZE |
- (STACK_BIAS + ALIGN_UP(STACK_ITEM_SIZE, |
STACK_ALIGNMENT)); |
uint64_t sp = (uintptr_t) THREAD->kstack + STACK_SIZE - |
(STACK_BIAS + ALIGN_UP(STACK_ITEM_SIZE, STACK_ALIGNMENT)); |
write_to_ig_g6(sp); |
write_to_ag_g6(sp); |
write_to_ag_g7((uintptr_t) THREAD->arch.uspace_window_buffer); |
/branches/rcu/kernel/arch/sparc64/src/proc/thread.c |
---|
55,7 → 55,7 |
* belonging to a killed thread. |
*/ |
frame_free(KA2PA(ALIGN_DOWN((uintptr_t) |
t->arch.uspace_window_buffer, PAGE_SIZE))); |
t->arch.uspace_window_buffer, PAGE_SIZE))); |
} |
} |
75,8 → 75,8 |
* Mind the possible alignment of the userspace window buffer |
* belonging to a killed thread. |
*/ |
t->arch.uspace_window_buffer = (uint8_t *) ALIGN_DOWN(uw_buf, |
PAGE_SIZE); |
t->arch.uspace_window_buffer = (uint8_t *) ALIGN_DOWN(uw_buf, |
PAGE_SIZE); |
} |
} |
/branches/rcu/kernel/arch/sparc64/src/sparc64.c |
---|
155,5 → 155,11 |
/* not reached */ |
} |
void arch_reboot(void) |
{ |
// TODO |
while (1); |
} |
/** @} |
*/ |
/branches/rcu/kernel/arch/sparc64/src/cpu/cpu.c |
---|
63,10 → 63,10 |
mid = *((uint32_t *) prop->value); |
if (mid == CPU->arch.mid) { |
prop = ofw_tree_getprop(node, |
"clock-frequency"); |
"clock-frequency"); |
if (prop && prop->value) |
clock_frequency = *((uint32_t *) |
prop->value); |
prop->value); |
} |
} |
node = ofw_tree_find_peer_by_device_type(node, "cpu"); |
/branches/rcu/kernel/arch/sparc64/src/mm/tlb.c |
---|
198,7 → 198,7 |
} |
/** ITLB miss handler. */ |
void fast_instruction_access_mmu_miss(int n, istate_t *istate) |
void fast_instruction_access_mmu_miss(unative_t unused, istate_t *istate) |
{ |
uintptr_t va = ALIGN_DOWN(istate->tpc, PAGE_SIZE); |
index_t index = (istate->tpc >> MMU_PAGE_WIDTH) % MMU_PAGES_PER_PAGE; |
234,15 → 234,18 |
* |
* Note that some faults (e.g. kernel faults) were already resolved by the |
* low-level, assembly language part of the fast_data_access_mmu_miss handler. |
* |
* @param tag Content of the TLB Tag Access register as it existed when the |
* trap happened. This is to prevent confusion created by clobbered |
* Tag Access register during a nested DTLB miss. |
* @param istate Interrupted state saved on the stack. |
*/ |
void fast_data_access_mmu_miss(int n, istate_t *istate) |
void fast_data_access_mmu_miss(tlb_tag_access_reg_t tag, istate_t *istate) |
{ |
tlb_tag_access_reg_t tag; |
uintptr_t va; |
index_t index; |
pte_t *t; |
tag.value = dtlb_tag_access_read(); |
va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE); |
index = tag.vpn % MMU_PAGES_PER_PAGE; |
282,15 → 285,19 |
} |
} |
/** DTLB protection fault handler. */ |
void fast_data_access_protection(int n, istate_t *istate) |
/** DTLB protection fault handler. |
* |
* @param tag Content of the TLB Tag Access register as it existed when the |
* trap happened. This is to prevent confusion created by clobbered |
* Tag Access register during a nested DTLB miss. |
* @param istate Interrupted state saved on the stack. |
*/ |
void fast_data_access_protection(tlb_tag_access_reg_t tag, istate_t *istate) |
{ |
tlb_tag_access_reg_t tag; |
uintptr_t va; |
index_t index; |
pte_t *t; |
tag.value = dtlb_tag_access_read(); |
va = ALIGN_DOWN((uint64_t) tag.vpn << MMU_PAGE_WIDTH, PAGE_SIZE); |
index = tag.vpn % MMU_PAGES_PER_PAGE; /* 16K-page emulation */ |
371,9 → 378,10 |
uintptr_t va; |
va = tag.vpn << MMU_PAGE_WIDTH; |
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va, |
tag.context); |
if (tag.context) { |
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va, |
tag.context); |
} |
dump_istate(istate); |
printf("Faulting page: %p, ASID=%d\n", va, tag.context); |
panic("%s\n", str); |
386,8 → 394,10 |
va = tag.vpn << MMU_PAGE_WIDTH; |
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va, |
tag.context); |
if (tag.context) { |
fault_if_from_uspace(istate, "%s, Page=%p (ASID=%d)\n", str, va, |
tag.context); |
} |
printf("Faulting page: %p, ASID=%d\n", va, tag.context); |
dump_istate(istate); |
panic("%s\n", str); |
/branches/rcu/kernel/arch/sparc64/src/mm/as.c |
---|
66,6 → 66,7 |
*/ |
int order = fnzb32(((ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * |
sizeof(tsb_entry_t)) >> FRAME_WIDTH); |
uintptr_t tsb = (uintptr_t) frame_alloc(order, flags | FRAME_KA); |
if (!tsb) |
74,6 → 75,7 |
as->arch.itsb = (tsb_entry_t *) tsb; |
as->arch.dtsb = (tsb_entry_t *) (tsb + ITSB_ENTRY_COUNT * |
sizeof(tsb_entry_t)); |
memsetb((uintptr_t) as->arch.itsb, |
(ITSB_ENTRY_COUNT + DTSB_ENTRY_COUNT) * sizeof(tsb_entry_t), 0); |
#endif |
/branches/rcu/kernel/arch/sparc64/src/mm/cache.S |
---|
45,8 → 45,10 |
subcc %g1, DCACHE_LINE_SIZE, %g1 |
bnz,pt %xcc, 0b |
stxa %g0, [%g1] ASI_DCACHE_TAG |
membar #Sync |
retl |
membar #Sync |
! beware SF Erratum #51, do not put the MEMBAR here |
nop |
/** Flush only D-cache lines of one virtual color. |
* |
/branches/rcu/kernel/arch/sparc64/src/mm/tsb.c |
---|
61,6 → 61,8 |
ASSERT(as->arch.itsb && as->arch.dtsb); |
i0 = (page >> MMU_PAGE_WIDTH) & TSB_INDEX_MASK; |
ASSERT(i0 < ITSB_ENTRY_COUNT && i0 < DTSB_ENTRY_COUNT); |
if (pages == (count_t) -1 || (pages * 2) > ITSB_ENTRY_COUNT) |
cnt = ITSB_ENTRY_COUNT; |
else |
84,9 → 86,12 |
as_t *as; |
tsb_entry_t *tsb; |
index_t entry; |
ASSERT(index <= 1); |
as = t->as; |
entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK; |
ASSERT(entry < ITSB_ENTRY_COUNT); |
tsb = &as->arch.itsb[entry]; |
/* |
102,8 → 107,8 |
write_barrier(); |
tsb->tag.context = as->asid; |
tsb->tag.va_tag = (t->page + (index << MMU_PAGE_WIDTH)) >> |
VA_TAG_PAGE_SHIFT; |
/* the shift is bigger than PAGE_WIDTH, do not bother with index */ |
tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT; |
tsb->data.value = 0; |
tsb->data.size = PAGESIZE_8K; |
tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index; |
128,8 → 133,11 |
tsb_entry_t *tsb; |
index_t entry; |
ASSERT(index <= 1); |
as = t->as; |
entry = ((t->page >> MMU_PAGE_WIDTH) + index) & TSB_INDEX_MASK; |
ASSERT(entry < DTSB_ENTRY_COUNT); |
tsb = &as->arch.dtsb[entry]; |
/* |
145,8 → 153,8 |
write_barrier(); |
tsb->tag.context = as->asid; |
tsb->tag.va_tag = (t->page + (index << MMU_PAGE_WIDTH)) >> |
VA_TAG_PAGE_SHIFT; |
/* the shift is bigger than PAGE_WIDTH, do not bother with index */ |
tsb->tag.va_tag = t->page >> VA_TAG_PAGE_SHIFT; |
tsb->data.value = 0; |
tsb->data.size = PAGESIZE_8K; |
tsb->data.pfn = (t->frame >> MMU_FRAME_WIDTH) + index; |
/branches/rcu/kernel/arch/sparc64/src/mm/frame.c |
---|
65,10 → 65,10 |
if (confdata == ADDR2PFN(KA2PA(PFN2ADDR(0)))) |
confdata = ADDR2PFN(KA2PA(PFN2ADDR(2))); |
zone_create(ADDR2PFN(start), |
SIZE2FRAMES(ALIGN_DOWN(size, FRAME_SIZE)), |
confdata, 0); |
SIZE2FRAMES(ALIGN_DOWN(size, FRAME_SIZE)), |
confdata, 0); |
last_frame = max(last_frame, start + ALIGN_UP(size, |
FRAME_SIZE)); |
FRAME_SIZE)); |
} |
/* |