Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 1850 → Rev 1851

/trunk/kernel/genarch/include/mm/as_ht.h
26,7 → 26,7
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup genarchmm
/** @addtogroup genarchmm
* @{
*/
/** @file
41,6 → 41,5
 
#endif
 
/** @}
/** @}
*/
 
/trunk/kernel/genarch/include/mm/page_pt.h
26,7 → 26,7
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup genarchmm
/** @addtogroup genarchmm
* @{
*/
/** @file
116,6 → 116,5
 
#endif
 
/** @}
/** @}
*/
 
/trunk/kernel/genarch/include/mm/as_pt.h
26,7 → 26,7
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup genarchmm
/** @addtogroup genarchmm
* @{
*/
/** @file
41,6 → 41,5
 
#endif
 
/** @}
/** @}
*/
 
/trunk/kernel/genarch/include/mm/page_ht.h
26,16 → 26,14
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup genarchmm
/** @addtogroup genarchmm
* @{
*/
/** @file
/**
* @file
* @brief This is the generic page hash table interface.
*/
 
/*
* This is the generic page hash table interface.
*/
 
#ifdef CONFIG_PAGE_HT
 
#ifndef __PAGE_HT_H__
87,6 → 85,5
 
#endif
 
/** @}
/** @}
*/
 
/trunk/kernel/genarch/src/mm/as_ht.c
26,7 → 26,7
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup genarchmm
/** @addtogroup genarchmm
* @{
*/
118,6 → 118,5
mutex_unlock(&as->lock);
}
 
/** @}
/** @}
*/
 
/trunk/kernel/genarch/src/mm/page_pt.c
26,7 → 26,7
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup genarchmm
/** @addtogroup genarchmm
* @{
*/
 
265,6 → 265,5
return &ptl3[PTL3_INDEX(page)];
}
 
/** @}
/** @}
*/
 
/trunk/kernel/genarch/src/mm/asid.c
26,7 → 26,7
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup genarchmm
/** @addtogroup genarchmm
* @{
*/
 
174,6 → 174,5
interrupts_restore(ipl);
}
 
/** @}
/** @}
*/
 
/trunk/kernel/genarch/src/mm/asid_fifo.c
26,7 → 26,7
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup genarchmm
/** @addtogroup genarchmm
* @{
*/
94,6 → 94,5
fifo_push(free_asids, asid);
}
 
/** @}
/** @}
*/
 
/trunk/kernel/genarch/src/mm/as_pt.c
26,7 → 26,7
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup genarchmm
/** @addtogroup genarchmm
* @{
*/
 
139,6 → 139,6
mutex_unlock(&as->lock);
}
 
/** @}
/** @}
*/
 
/trunk/kernel/genarch/src/mm/page_ht.c
26,7 → 26,7
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup genarchmm
/** @addtogroup genarchmm
* @{
*/
 
244,6 → 244,5
return t;
}
 
/** @}
/** @}
*/
 
/trunk/kernel/generic/include/mm/as.h
26,7 → 26,7
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup genericmm
/** @addtogroup genericmm
* @{
*/
/** @file
206,6 → 206,5
 
#endif
 
/** @}
/** @}
*/
 
/trunk/kernel/generic/src/mm/as.c
543,7 → 543,7
if (!src_area->backend || !src_area->backend->share) {
/*
* There is now backend or the backend does not
* There is no backend or the backend does not
* know how to share the area.
*/
mutex_unlock(&src_area->lock);
/trunk/kernel/arch/sparc64/include/context_offset.h
20,6 → 20,3
#define OFFSET_L7 0x88
#define OFFSET_CLEANWIN 0x98
 
/** @}
*/
 
/trunk/kernel/arch/sparc64/include/interrupt.h
26,7 → 26,7
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup sparc64interrupt sparc64
/** @addtogroup sparc64interrupt sparc64
* @ingroup interrupt
* @{
*/
52,6 → 52,10
#define trap_virtual_eoi()
 
struct istate {
uint64_t pstate;
uint64_t tnpc;
uint64_t tpc;
uint64_t tstate;
};
 
static inline void istate_set_retaddr(istate_t *istate, uintptr_t retaddr)
74,6 → 78,5
 
#endif
 
/** @}
/** @}
*/
 
/trunk/kernel/arch/sparc64/include/regdef.h
38,6 → 38,10
#define PSTATE_IE_BIT 2
#define PSTATE_AM_BIT 8
 
#define PSTATE_AG_BIT (1<<0)
#define PSTATE_IG_BIT (1<<11)
#define PSTATE_MG_BIT (1<<10)
 
#endif
 
/** @}
/trunk/kernel/arch/sparc64/include/trap/interrupt.h
81,7 → 81,6
.macro INTERRUPT_LEVEL_N_HANDLER n
save %sp, -PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE, %sp
mov \n - 1, %o0
mov %fp, %o1
PREEMPTIBLE_HANDLER exc_dispatch
.endm
 
/trunk/kernel/arch/sparc64/include/trap/trap_table.h
77,6 → 77,10
mov %l7, %g7
.endm
 
/*
* The following needs to be in sync with the
* definition of the istate structure.
*/
#define PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE (STACK_WINDOW_SAVE_AREA_SIZE+(4*8))
#define SAVED_TSTATE -(1*8)
#define SAVED_TPC -(2*8)
/trunk/kernel/arch/sparc64/include/trap/mmu.h
38,6 → 38,9
#define __sparc64_MMU_TRAP_H__
 
#include <arch/stack.h>
#include <arch/mm/tlb.h>
#include <arch/mm/mmu.h>
#include <arch/mm/tte.h>
 
#define TT_FAST_INSTRUCTION_ACCESS_MMU_MISS 0x64
#define TT_FAST_DATA_ACCESS_MMU_MISS 0x68
55,11 → 58,37
.endm
 
.macro FAST_DATA_ACCESS_MMU_MISS_HANDLER
save %sp, -STACK_WINDOW_SAVE_AREA_SIZE, %sp
call fast_data_access_mmu_miss
nop
restore
/*
* First, test if it is the portion of the kernel address space
* which is faulting. If that is the case, immediately create
* identity mapping for that page in DTLB. VPN 0 is excluded from
* this treatment.
*
* Note that branch-delay slots are used in order to save space.
*/
mov VA_DMMU_TAG_ACCESS, %g1
ldxa [%g1] ASI_DMMU, %g1 ! read the faulting Context and VPN
set TLB_TAG_ACCESS_CONTEXT_MASK, %g2
andcc %g1, %g2, %g3 ! get Context
bnz 0f ! Context is non-zero
andncc %g1, %g2, %g3 ! get page address into %g3
bz 0f ! page address is zero
 
/*
* Create and insert the identity-mapped entry for
* the faulting kernel page.
*/
or %g3, (TTE_CP|TTE_P|TTE_W), %g2 ! 8K pages are the default (encoded as 0)
set 1, %g3
sllx %g3, TTE_V_SHIFT, %g3
or %g2, %g3, %g2
stxa %g2, [%g0] ASI_DTLB_DATA_IN_REG ! identity map the kernel page
retry
 
0:
save %sp, -PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE, %sp
PREEMPTIBLE_HANDLER fast_data_access_mmu_miss
.endm
 
.macro FAST_DATA_ACCESS_PROTECTION_HANDLER
/trunk/kernel/arch/sparc64/include/mm/tlb.h
66,6 → 66,7
 
/* TLB Tag Access shifts */
#define TLB_TAG_ACCESS_CONTEXT_SHIFT 0
#define TLB_TAG_ACCESS_CONTEXT_MASK ((1<<13)-1)
#define TLB_TAG_ACCESS_VPN_SHIFT 13
 
#ifndef __ASM__
107,7 → 108,7
uint64_t value;
struct {
uint64_t vpn : 51; /**< Virtual Address bits 63:13. */
unsigned context : 13; /**< Context identifier. */
unsigned context : 13; /**< Context identifier. */
} __attribute__ ((packed));
};
typedef union tlb_tag_read_reg tlb_tag_read_reg_t;
118,7 → 119,7
union tlb_demap_addr {
uint64_t value;
struct {
uint64_t vpn: 51; /**< Virtual Address bits 63:13. */
uint64_t vpn: 51; /**< Virtual Address bits 63:13. */
unsigned : 6; /**< Ignored. */
unsigned type : 1; /**< The type of demap operation. */
unsigned context : 2; /**< Context register selection. */
131,11 → 132,9
union tlb_sfsr_reg {
uint64_t value;
struct {
unsigned long : 39; /**< Implementation dependent. */
unsigned nf : 1; /**< Nonfaulting load. */
unsigned long : 40; /**< Implementation dependent. */
unsigned asi : 8; /**< ASI. */
unsigned tm : 1; /**< TLB miss. */
unsigned : 1;
unsigned : 2;
unsigned ft : 7; /**< Fault type. */
unsigned e : 1; /**< Side-effect bit. */
unsigned ct : 2; /**< Context Register selection. */
425,9 → 424,9
membar();
}
 
extern void fast_instruction_access_mmu_miss(void);
extern void fast_data_access_mmu_miss(void);
extern void fast_data_access_protection(void);
extern void fast_instruction_access_mmu_miss(int n, istate_t *istate);
extern void fast_data_access_mmu_miss(int n, istate_t *istate);
extern void fast_data_access_protection(int n, istate_t *istate);
 
extern void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize, bool locked, bool cacheable);
 
/trunk/kernel/arch/sparc64/include/context.h
26,7 → 26,7
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup sparc64
/** @addtogroup sparc64
* @{
*/
/** @file
89,6 → 89,5
 
#endif
 
/** @}
/** @}
*/
 
/trunk/kernel/arch/sparc64/src/trap/trap_table.S
43,6 → 43,7
#include <arch/trap/exception.h>
#include <arch/trap/mmu.h>
#include <arch/stack.h>
#include <arch/regdef.h>
 
#define TABLE_SIZE TRAP_TABLE_SIZE
#define ENTRY_SIZE TRAP_TABLE_ENTRY_SIZE
275,15 → 276,25
FILL_NORMAL_HANDLER
 
 
/* Preemptible trap handler.
/* Preemptible trap handler for TL=1.
*
* This trap handler makes arrangements to
* make calling scheduler() possible.
* This trap handler makes arrangements to make calling of scheduler() from
* within a trap context possible. It is guaranteed to function only when traps
* are not nested (i.e. for TL=1).
*
* The caller is responsible for doing save
* and allocating PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE
* bytes on stack.
* Every trap handler on TL=1 that makes a call to the scheduler needs to
* be based on this function. The reason behind it is that the nested
* trap levels and the automatic saving of the interrupted context by hardware
* does not work well together with scheduling (i.e. a thread cannot be rescheduled
* with TL>0). Therefore it is necessary to eliminate the effect of trap levels
* by software and save the necessary state on the kernel stack.
*
* Note that for traps with TL>1, more state needs to be saved. This function
* is therefore not going to work when TL>1.
*
* The caller is responsible for doing SAVE and allocating
* PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE bytes on the stack.
*
* Input registers:
* %l0 Address of function to call.
* Output registers:
299,6 → 310,11
rdpr %tnpc, %g3
rdpr %pstate, %g4
 
/*
* The following memory accesses will not fault
* because special provisions are made to have
* the kernel stack of THREAD locked in DTLB.
*/
stx %g1, [%fp + STACK_BIAS + SAVED_TSTATE]
stx %g2, [%fp + STACK_BIAS + SAVED_TPC]
stx %g3, [%fp + STACK_BIAS + SAVED_TNPC]
313,7 → 329,7
* Alter PSTATE.
* - switch to normal globals.
*/
and %g4, ~1, %g4 ! mask alternate globals
and %g4, ~(PSTATE_AG_BIT|PSTATE_IG_BIT|PSTATE_MG_BIT), %g4
wrpr %g4, 0, %pstate
/*
324,17 → 340,18
/*
* Call the higher-level handler.
*/
mov %fp, %o1 ! calculate istate address
call %l0
nop
add %o1, STACK_BIAS + SAVED_PSTATE, %o1 ! calculate istate address
/*
* Restore the normal global register set.
* Restore the normal global register set.
*/
RESTORE_GLOBALS
/*
* Restore PSTATE from saved copy.
* Alternate globals become active.
* Alternate/Interrupt/MM globals become active.
*/
ldx [%fp + STACK_BIAS + SAVED_PSTATE], %l4
wrpr %l4, 0, %pstate
357,10 → 374,10
restore
 
/*
* On execution of retry instruction, CWP will be restored from TSTATE register.
* However, because of scheduling, it is possible that CWP in saved TSTATE
* is different from current CWP. The following chunk of code fixes CWP
* in the saved copy of TSTATE.
* On execution of the RETRY instruction, CWP will be restored from the TSTATE
* register. However, because of scheduling, it is possible that CWP in the saved
* TSTATE is different from the current CWP. The following chunk of code fixes
* CWP in the saved copy of TSTATE.
*/
rdpr %cwp, %g4 ! read current CWP
and %g1, ~0x1f, %g1 ! clear CWP field in saved TSTATE
/trunk/kernel/arch/sparc64/src/mm/tlb.c
34,10 → 34,13
 
#include <arch/mm/tlb.h>
#include <mm/tlb.h>
#include <mm/as.h>
#include <mm/asid.h>
#include <arch/mm/frame.h>
#include <arch/mm/page.h>
#include <arch/mm/mmu.h>
#include <mm/asid.h>
#include <arch/interrupt.h>
#include <arch.h>
#include <print.h>
#include <arch/types.h>
#include <typedefs.h>
47,6 → 50,9
#include <arch/asm.h>
#include <symtab.h>
 
static void dtlb_pte_copy(pte_t *t);
static void do_fast_data_access_mmu_miss_fault(istate_t *istate, const char *str);
 
char *context_encoding[] = {
"Primary",
"Secondary",
99,37 → 105,60
dtlb_data_in_write(data.value);
}
 
void dtlb_pte_copy(pte_t *t)
{
}
 
/** ITLB miss handler. */
void fast_instruction_access_mmu_miss(void)
void fast_instruction_access_mmu_miss(int n, istate_t *istate)
{
panic("%s\n", __FUNCTION__);
}
 
/** DTLB miss handler. */
void fast_data_access_mmu_miss(void)
/** DTLB miss handler.
*
* Note that some faults (e.g. kernel faults) were already resolved
* by the low-level, assembly language part of the fast_data_access_mmu_miss
* handler.
*/
void fast_data_access_mmu_miss(int n, istate_t *istate)
{
tlb_tag_access_reg_t tag;
uintptr_t tpc;
char *tpc_str;
uintptr_t va;
pte_t *t;
 
tag.value = dtlb_tag_access_read();
if (tag.context != ASID_KERNEL || tag.vpn == 0) {
tpc = tpc_read();
tpc_str = get_symtab_entry(tpc);
va = tag.vpn * PAGE_SIZE;
if (tag.context == ASID_KERNEL) {
if (!tag.vpn) {
/* NULL access in kernel */
do_fast_data_access_mmu_miss_fault(istate, __FUNCTION__);
}
do_fast_data_access_mmu_miss_fault(istate, "Unexpected kernel page fault.");
}
 
printf("Faulting page: %p, ASID=%d\n", tag.vpn * PAGE_SIZE, tag.context);
printf("TPC=%p, (%s)\n", tpc, tpc_str ? tpc_str : "?");
panic("%s\n", __FUNCTION__);
page_table_lock(AS, true);
t = page_mapping_find(AS, va);
if (t) {
/*
* The mapping was found in the software page hash table.
* Insert it into DTLB.
*/
dtlb_pte_copy(t);
page_table_unlock(AS, true);
} else {
/*
* Forward the page fault to the address space page fault handler.
*/
page_table_unlock(AS, true);
if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {
do_fast_data_access_mmu_miss_fault(istate, __FUNCTION__);
}
}
 
/*
* Identity map piece of faulting kernel address space.
*/
dtlb_insert_mapping(tag.vpn * PAGE_SIZE, tag.vpn * FRAME_SIZE, PAGESIZE_8K, false, true);
}
 
/** DTLB protection fault handler. */
void fast_data_access_protection(void)
void fast_data_access_protection(int n, istate_t *istate)
{
panic("%s\n", __FUNCTION__);
}
161,6 → 190,20
 
}
 
void do_fast_data_access_mmu_miss_fault(istate_t *istate, const char *str)
{
tlb_tag_access_reg_t tag;
uintptr_t va;
char *tpc_str = get_symtab_entry(istate->tpc);
 
tag.value = dtlb_tag_access_read();
va = tag.vpn * PAGE_SIZE;
 
printf("Faulting page: %p, ASID=%d\n", va, tag.context);
printf("TPC=%p, (%s)\n", istate->tpc, tpc_str);
panic("%s\n", str);
}
 
/** Invalidate all unlocked ITLB and DTLB entries. */
void tlb_invalidate_all(void)
{
/trunk/kernel/arch/ia64/src/mm/tlb.c
506,7 → 506,7
t = page_mapping_find(AS, va);
if (t) {
/*
* The mapping was found in software page hash table.
* The mapping was found in the software page hash table.
* Insert it into data translation cache.
*/
dtc_pte_copy(t);
513,7 → 513,7
page_table_unlock(AS, true);
} else {
/*
* Forward the page fault to address space page fault handler.
* Forward the page fault to the address space page fault handler.
*/
page_table_unlock(AS, true);
if (as_page_fault(va, PF_ACCESS_READ, istate) == AS_PF_FAULT) {