/trunk/kernel/generic/include/adt/hash_table.h |
---|
26,14 → 26,14 |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericadt |
/** @addtogroup genericadt |
* @{ |
*/ |
/** @file |
*/ |
#ifndef __HASH_TABLE_H__ |
#define __HASH_TABLE_H__ |
#ifndef KERN_HASH_TABLE_H_ |
#define KERN_HASH_TABLE_H_ |
#include <adt/list.h> |
#include <arch/types.h> |
81,6 → 81,5 |
#endif |
/** @} |
/** @} |
*/ |
/trunk/kernel/arch/sparc64/include/asm.h |
---|
321,6 → 321,18 |
__asm__ volatile ("flushw\n"); |
} |
/** Switch to nucleus by setting TL to 1. */ |
static inline void nucleus_enter(void) |
{ |
__asm__ volatile ("wrpr %g0, 1, %tl\n"); |
} |
/** Switch from nucleus by setting TL to 0. */ |
static inline void nucleus_leave(void) |
{ |
__asm__ volatile ("wrpr %g0, %g0, %tl\n"); |
} |
extern void cpu_halt(void); |
extern void cpu_sleep(void); |
extern void asm_delay_loop(uint32_t t); |
/trunk/kernel/arch/sparc64/include/trap/mmu.h |
---|
121,6 → 121,11 |
PREEMPTIBLE_HANDLER fast_data_access_protection |
.endm |
.macro MEM_ADDRESS_NOT_ALIGNED_HANDLER |
ba mem_address_not_aligned_handler |
nop |
.endm |
/* |
* Macro used to lower TL when a MMU trap is caused by |
* the userspace register window spill or fill handler. |
/trunk/kernel/arch/sparc64/Makefile.inc |
---|
95,6 → 95,7 |
arch/$(ARCH)/src/start.S \ |
arch/$(ARCH)/src/proc/scheduler.c \ |
arch/$(ARCH)/src/proc/thread.c \ |
arch/$(ARCH)/src/trap/mmu.S \ |
arch/$(ARCH)/src/trap/trap_table.S \ |
arch/$(ARCH)/src/trap/trap.c \ |
arch/$(ARCH)/src/trap/exception.c \ |
/trunk/kernel/arch/sparc64/src/asm.S |
---|
26,6 → 26,7 |
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
# |
#include <arch/arch.h> |
#include <arch/stack.h> |
#include <arch/regdef.h> |
#include <arch/mm/mmu.h> |
32,19 → 33,14 |
.text |
.register %g2, #scratch |
.register %g3, #scratch |
/* |
* This is the assembly language version of our _memcpy() generated by gcc. |
*/ |
.global memcpy |
.global memcpy_from_uspace |
.global memcpy_to_uspace |
.global memcpy_from_uspace_failover_address |
.global memcpy_to_uspace_failover_address |
.global memsetb |
memcpy: |
memcpy_from_uspace: |
memcpy_to_uspace: |
.register %g2, #scratch |
.register %g3, #scratch |
add %o1, 7, %g1 |
and %g1, -8, %g1 |
cmp %o1, %g1 |
99,11 → 95,132 |
jmp %o7 + 8 ! exit point |
mov %o1, %o0 |
/* |
* Almost the same as memcpy() except the loads are from userspace. |
*/ |
.global memcpy_from_uspace |
memcpy_from_uspace: |
add %o1, 7, %g1 |
and %g1, -8, %g1 |
cmp %o1, %g1 |
be,pn %xcc, 3f |
add %o0, 7, %g1 |
mov 0, %g3 |
0: |
brz,pn %o2, 2f |
mov 0, %g2 |
1: |
lduba [%g3 + %o1] ASI_AIUS, %g1 |
add %g2, 1, %g2 |
cmp %o2, %g2 |
stb %g1, [%g3 + %o0] |
bne,pt %xcc, 1b |
mov %g2, %g3 |
2: |
jmp %o7 + 8 ! exit point |
mov %o1, %o0 |
3: |
and %g1, -8, %g1 |
cmp %o0, %g1 |
bne,pt %xcc, 0b |
mov 0, %g3 |
srlx %o2, 3, %g4 |
brz,pn %g4, 5f |
mov 0, %g5 |
4: |
sllx %g3, 3, %g2 |
add %g5, 1, %g3 |
ldxa [%o1 + %g2] ASI_AIUS, %g1 |
mov %g3, %g5 |
cmp %g4, %g3 |
bne,pt %xcc, 4b |
stx %g1, [%o0 + %g2] |
5: |
and %o2, 7, %o2 |
brz,pn %o2, 2b |
sllx %g4, 3, %g1 |
mov 0, %g2 |
add %g1, %o0, %o0 |
add %g1, %o1, %g4 |
mov 0, %g3 |
6: |
lduba [%g2 + %g4] ASI_AIUS, %g1 |
stb %g1, [%g2 + %o0] |
add %g3, 1, %g2 |
cmp %o2, %g2 |
bne,pt %xcc, 6b |
mov %g2, %g3 |
jmp %o7 + 8 ! exit point |
mov %o1, %o0 |
/* |
* Almost the same as memcpy() except the stores are to userspace. |
*/ |
.global memcpy_to_uspace |
memcpy_to_uspace: |
add %o1, 7, %g1 |
and %g1, -8, %g1 |
cmp %o1, %g1 |
be,pn %xcc, 3f |
add %o0, 7, %g1 |
mov 0, %g3 |
0: |
brz,pn %o2, 2f |
mov 0, %g2 |
1: |
ldub [%g3 + %o1], %g1 |
add %g2, 1, %g2 |
cmp %o2, %g2 |
stba %g1, [%g3 + %o0] ASI_AIUS |
bne,pt %xcc, 1b |
mov %g2, %g3 |
2: |
jmp %o7 + 8 ! exit point |
mov %o1, %o0 |
3: |
and %g1, -8, %g1 |
cmp %o0, %g1 |
bne,pt %xcc, 0b |
mov 0, %g3 |
srlx %o2, 3, %g4 |
brz,pn %g4, 5f |
mov 0, %g5 |
4: |
sllx %g3, 3, %g2 |
add %g5, 1, %g3 |
ldx [%o1 + %g2], %g1 |
mov %g3, %g5 |
cmp %g4, %g3 |
bne,pt %xcc, 4b |
stxa %g1, [%o0 + %g2] ASI_AIUS |
5: |
and %o2, 7, %o2 |
brz,pn %o2, 2b |
sllx %g4, 3, %g1 |
mov 0, %g2 |
add %g1, %o0, %o0 |
add %g1, %o1, %g4 |
mov 0, %g3 |
6: |
ldub [%g2 + %g4], %g1 |
stba %g1, [%g2 + %o0] ASI_AIUS |
add %g3, 1, %g2 |
cmp %o2, %g2 |
bne,pt %xcc, 6b |
mov %g2, %g3 |
jmp %o7 + 8 ! exit point |
mov %o1, %o0 |
.global memcpy_from_uspace_failover_address |
.global memcpy_to_uspace_failover_address |
memcpy_from_uspace_failover_address: |
memcpy_to_uspace_failover_address: |
jmp %o7 + 8 ! exit point |
mov %g0, %o0 ! return 0 on failure |
.global memsetb |
memsetb: |
b _memsetb |
nop |
154,9 → 271,9 |
*/ |
.global switch_to_userspace |
switch_to_userspace: |
save %o1, -STACK_WINDOW_SAVE_AREA_SIZE, %sp |
flushw |
wrpr %g0, 0, %cleanwin ! avoid information leak |
save %o1, -STACK_WINDOW_SAVE_AREA_SIZE, %sp |
mov %i3, %o0 ! uarg |
/trunk/kernel/arch/sparc64/src/trap/mmu.S |
---|
0,0 → 1,46 |
# |
# Copyright (C) 2006 Jakub Jermar |
# All rights reserved. |
# |
# Redistribution and use in source and binary forms, with or without |
# modification, are permitted provided that the following conditions |
# are met: |
# |
# - Redistributions of source code must retain the above copyright |
# notice, this list of conditions and the following disclaimer. |
# - Redistributions in binary form must reproduce the above copyright |
# notice, this list of conditions and the following disclaimer in the |
# documentation and/or other materials provided with the distribution. |
# - The name of the author may not be used to endorse or promote products |
# derived from this software without specific prior written permission. |
# |
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
# |
/** |
* @file |
* @brief MMU trap handlers that do not fit into the trap table. |
*/ |
.register %g2, #scratch |
.register %g3, #scratch |
.text |
#include <arch/trap/mmu.h> |
#include <arch/trap/trap_table.h> |
#include <arch/regdef.h> |
.global mem_address_not_aligned_handler |
mem_address_not_aligned_handler: |
HANDLE_MMU_TRAPS_FROM_SPILL_OR_FILL |
PREEMPTIBLE_HANDLER do_mem_address_not_aligned |
/trunk/kernel/arch/sparc64/src/trap/trap_table.S |
---|
27,7 → 27,8 |
# |
/** |
* This file contains kernel trap table. |
* @file |
* @brief This file contains kernel trap table. |
*/ |
.register %g2, #scratch |
84,7 → 85,7 |
.org trap_table + TT_MEM_ADDRESS_NOT_ALIGNED*ENTRY_SIZE |
.global mem_address_not_aligned |
mem_address_not_aligned: |
PREEMPTIBLE_HANDLER do_mem_address_not_aligned |
MEM_ADDRESS_NOT_ALIGNED_HANDLER |
/* TT = 0x41, TL = 0, interrupt_level_1 handler */ |
.org trap_table + TT_INTERRUPT_LEVEL_1*ENTRY_SIZE |
460,7 → 461,7 |
.org trap_table + (TT_MEM_ADDRESS_NOT_ALIGNED+512)*ENTRY_SIZE |
.global mem_address_not_aligned_high |
mem_address_not_aligned_high: |
PREEMPTIBLE_HANDLER do_mem_address_not_aligned |
MEM_ADDRESS_NOT_ALIGNED_HANDLER |
/* TT = 0x64, TL > 0, fast_instruction_access_MMU_miss */ |
.org trap_table + (TT_FAST_INSTRUCTION_ACCESS_MMU_MISS+512)*ENTRY_SIZE |
524,6 → 525,17 |
* %g7 Pre-set as address of the userspace window buffer. |
*/ |
.macro PREEMPTIBLE_HANDLER_TEMPLATE is_syscall |
/* |
* ASSERT(%tl == 1) |
*/ |
rdpr %tl, %g3 |
cmp %g3, 1 |
be 1f |
nop |
0: ba 0b ! this for debugging, if we ever get here |
nop ! it will be easy to find |
1: |
.if NOT(\is_syscall) |
rdpr %tstate, %g3 |
andcc %g3, TSTATE_PRIV_BIT, %g0 ! if this trap came from the privileged mode... |
535,6 → 547,8 |
*/ |
wrpr %g0, WSTATE_OTHER(0) | WSTATE_NORMAL(2), %wstate |
wrpr %g0, NWINDOW - 1, %cleanwin ! prevent unnecessary clean_window exceptions |
/* |
* Switch to kernel stack. The old stack is |
* automatically saved in the old window's %sp |
554,12 → 568,10 |
/* |
* Mark the CANRESTORE windows as OTHER windows. |
* Set CLEANWIN to NWINDOW-1 so that clean_window traps do not occur. |
*/ |
rdpr %canrestore, %l0 |
wrpr %l0, %otherwin |
wrpr %g0, %canrestore |
wrpr %g0, NWINDOW - 1, %cleanwin |
/* |
* Switch to primary context 0. |
726,7 → 738,8 |
wr %g0, ASI_DMMU, %asi |
ldxa [VA_SECONDARY_CONTEXT_REG] %asi, %g1 |
stxa %g1, [VA_PRIMARY_CONTEXT_REG] %asi |
flush %o7 |
rd %pc, %g1 |
flush %g1 |
rdpr %cwp, %g1 |
rdpr %otherwin, %g2 |
/trunk/kernel/arch/sparc64/src/mm/tlb.c |
---|
52,9 → 52,9 |
static void dtlb_pte_copy(pte_t *t, bool ro); |
static void itlb_pte_copy(pte_t *t); |
static void do_fast_data_access_mmu_miss_fault(istate_t *istate, const char *str); |
static void do_fast_instruction_access_mmu_miss_fault(istate_t *istate, const char *str); |
static void do_fast_data_access_protection_fault(istate_t *istate, const char *str); |
static void do_fast_data_access_mmu_miss_fault(istate_t *istate, tlb_tag_access_reg_t tag, const char *str); |
static void do_fast_data_access_protection_fault(istate_t *istate, tlb_tag_access_reg_t tag, const char *str); |
char *context_encoding[] = { |
"Primary", |
213,13 → 213,14 |
pte_t *t; |
tag.value = dtlb_tag_access_read(); |
va = tag.vpn * PAGE_SIZE; |
va = tag.vpn << PAGE_WIDTH; |
if (tag.context == ASID_KERNEL) { |
if (!tag.vpn) { |
/* NULL access in kernel */ |
do_fast_data_access_mmu_miss_fault(istate, __FUNCTION__); |
do_fast_data_access_mmu_miss_fault(istate, tag, __FUNCTION__); |
} |
do_fast_data_access_mmu_miss_fault(istate, "Unexpected kernel page fault."); |
do_fast_data_access_mmu_miss_fault(istate, tag, "Unexpected kernel page fault."); |
} |
page_table_lock(AS, true); |
238,7 → 239,7 |
*/ |
page_table_unlock(AS, true); |
if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) { |
do_fast_data_access_mmu_miss_fault(istate, __FUNCTION__); |
do_fast_data_access_mmu_miss_fault(istate, tag, __FUNCTION__); |
} |
} |
} |
251,7 → 252,7 |
pte_t *t; |
tag.value = dtlb_tag_access_read(); |
va = tag.vpn * PAGE_SIZE; |
va = tag.vpn << PAGE_WIDTH; |
page_table_lock(AS, true); |
t = page_mapping_find(AS, va); |
271,7 → 272,7 |
*/ |
page_table_unlock(AS, true); |
if (as_page_fault(va, PF_ACCESS_WRITE, istate) == AS_PF_FAULT) { |
do_fast_data_access_protection_fault(istate, __FUNCTION__); |
do_fast_data_access_protection_fault(istate, tag, __FUNCTION__); |
} |
} |
} |
311,14 → 312,12 |
panic("%s\n", str); |
} |
void do_fast_data_access_mmu_miss_fault(istate_t *istate, const char *str) |
void do_fast_data_access_mmu_miss_fault(istate_t *istate, tlb_tag_access_reg_t tag, const char *str) |
{ |
tlb_tag_access_reg_t tag; |
uintptr_t va; |
char *tpc_str = get_symtab_entry(istate->tpc); |
tag.value = dtlb_tag_access_read(); |
va = tag.vpn * PAGE_SIZE; |
va = tag.vpn << PAGE_WIDTH; |
printf("Faulting page: %p, ASID=%d\n", va, tag.context); |
printf("TPC=%p, (%s)\n", istate->tpc, tpc_str); |
325,14 → 324,12 |
panic("%s\n", str); |
} |
void do_fast_data_access_protection_fault(istate_t *istate, const char *str) |
void do_fast_data_access_protection_fault(istate_t *istate, tlb_tag_access_reg_t tag, const char *str) |
{ |
tlb_tag_access_reg_t tag; |
uintptr_t va; |
char *tpc_str = get_symtab_entry(istate->tpc); |
tag.value = dtlb_tag_access_read(); |
va = tag.vpn * PAGE_SIZE; |
va = tag.vpn << PAGE_WIDTH; |
printf("Faulting page: %p, ASID=%d\n", va, tag.context); |
printf("TPC=%p, (%s)\n", istate->tpc, tpc_str); |
374,16 → 371,21 |
*/ |
void tlb_invalidate_asid(asid_t asid) |
{ |
tlb_context_reg_t sc_save, ctx; |
tlb_context_reg_t pc_save, ctx; |
ctx.v = sc_save.v = mmu_secondary_context_read(); |
/* switch to nucleus because we are mapped by the primary context */ |
nucleus_enter(); |
ctx.v = pc_save.v = mmu_primary_context_read(); |
ctx.context = asid; |
mmu_secondary_context_write(ctx.v); |
mmu_primary_context_write(ctx.v); |
itlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_SECONDARY, 0); |
dtlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_SECONDARY, 0); |
itlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0); |
dtlb_demap(TLB_DEMAP_CONTEXT, TLB_DEMAP_PRIMARY, 0); |
mmu_secondary_context_write(sc_save.v); |
mmu_primary_context_write(pc_save.v); |
nucleus_leave(); |
} |
/** Invalidate all ITLB and DTLB entries for specified page range in specified address space. |
395,18 → 397,23 |
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt) |
{ |
int i; |
tlb_context_reg_t sc_save, ctx; |
tlb_context_reg_t pc_save, ctx; |
ctx.v = sc_save.v = mmu_secondary_context_read(); |
/* switch to nucleus because we are mapped by the primary context */ |
nucleus_enter(); |
ctx.v = pc_save.v = mmu_primary_context_read(); |
ctx.context = asid; |
mmu_secondary_context_write(ctx.v); |
mmu_primary_context_write(ctx.v); |
for (i = 0; i < cnt; i++) { |
itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY, page + i * PAGE_SIZE); |
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_SECONDARY, page + i * PAGE_SIZE); |
itlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, page + i * PAGE_SIZE); |
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_PRIMARY, page + i * PAGE_SIZE); |
} |
mmu_secondary_context_write(sc_save.v); |
mmu_primary_context_write(pc_save.v); |
nucleus_leave(); |
} |
/** @} |