Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 2048 → Rev 2049

/trunk/kernel/arch/sparc64/src/proc/scheduler.c
51,10 → 51,9
 
/** Perform sparc64 specific steps before scheduling a thread.
*
* Ensure that thread's kernel stack, as well as userspace window
* buffer for userspace threads, are locked in DTLB.
* For userspace threads, initialize reserved global registers
* in the alternate and interrupt sets.
* Ensure that thread's kernel stack, as well as userspace window buffer for
* userspace threads, are locked in DTLB. For userspace threads, initialize
* reserved global registers in the alternate and interrupt sets.
*/
void before_thread_runs_arch(void)
{
62,14 → 61,17
base = ALIGN_DOWN(config.base, 1<<KERNEL_PAGE_WIDTH);
 
if (!overlaps((uintptr_t) THREAD->kstack, PAGE_SIZE, base, (1<<KERNEL_PAGE_WIDTH))) {
if (!overlaps((uintptr_t) THREAD->kstack, PAGE_SIZE, base, (1 <<
KERNEL_PAGE_WIDTH))) {
/*
* Kernel stack of this thread is not locked in DTLB.
* First, make sure it is not mapped already.
* If not, create a locked mapping for it.
*/
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t) THREAD->kstack);
dtlb_insert_mapping((uintptr_t) THREAD->kstack, KA2PA(THREAD->kstack), PAGESIZE_8K, true, true);
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t)
THREAD->kstack);
dtlb_insert_mapping((uintptr_t) THREAD->kstack,
KA2PA(THREAD->kstack), PAGESIZE_8K, true, true);
}
if ((THREAD->flags & THREAD_FLAG_USPACE)) {
78,21 → 80,27
* its userspace window buffer into DTLB.
*/
ASSERT(THREAD->arch.uspace_window_buffer);
uintptr_t uw_buf = ALIGN_DOWN((uintptr_t) THREAD->arch.uspace_window_buffer, PAGE_SIZE);
if (!overlaps(uw_buf, PAGE_SIZE, base, 1<<KERNEL_PAGE_WIDTH)) {
uintptr_t uw_buf = ALIGN_DOWN((uintptr_t)
THREAD->arch.uspace_window_buffer, PAGE_SIZE);
if (!overlaps(uw_buf, PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH))
{
/*
* The buffer is not covered by the 4M locked kernel DTLB entry.
* The buffer is not covered by the 4M locked kernel
* DTLB entry.
*/
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, uw_buf);
dtlb_insert_mapping(uw_buf, KA2PA(uw_buf), PAGESIZE_8K, true, true);
dtlb_insert_mapping(uw_buf, KA2PA(uw_buf), PAGESIZE_8K,
true, true);
}
/*
* Write kernel stack address to %g6 and a pointer to the last item
* in the userspace window buffer to %g7 in the alternate and interrupt sets.
* Write kernel stack address to %g6 and a pointer to the last
* item in the userspace window buffer to %g7 in the alternate
* and interrupt sets.
*/
uint64_t sp = (uintptr_t) THREAD->kstack + STACK_SIZE
- (STACK_BIAS + ALIGN_UP(STACK_ITEM_SIZE, STACK_ALIGNMENT));
- (STACK_BIAS + ALIGN_UP(STACK_ITEM_SIZE,
STACK_ALIGNMENT));
write_to_ig_g6(sp);
write_to_ag_g6(sp);
write_to_ag_g7((uintptr_t) THREAD->arch.uspace_window_buffer);
108,14 → 116,16
{
uintptr_t base;
 
base = ALIGN_DOWN(config.base, 1<<KERNEL_PAGE_WIDTH);
base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
 
if (!overlaps((uintptr_t) THREAD->kstack, PAGE_SIZE, base, (1<<KERNEL_PAGE_WIDTH))) {
if (!overlaps((uintptr_t) THREAD->kstack, PAGE_SIZE, base, (1 <<
KERNEL_PAGE_WIDTH))) {
/*
* Kernel stack of this thread is locked in DTLB.
* Destroy the mapping.
*/
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t) THREAD->kstack);
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t)
THREAD->kstack);
}
if ((THREAD->flags & THREAD_FLAG_USPACE)) {
125,8 → 135,9
*/
ASSERT(THREAD->arch.uspace_window_buffer);
uintptr_t uw_buf = ALIGN_DOWN((uintptr_t) THREAD->arch.uspace_window_buffer, PAGE_SIZE);
if (!overlaps(uw_buf, PAGE_SIZE, base, 1<<KERNEL_PAGE_WIDTH)) {
uintptr_t uw_buf = ALIGN_DOWN((uintptr_t)
THREAD->arch.uspace_window_buffer, PAGE_SIZE);
if (!overlaps(uw_buf, PAGE_SIZE, base, 1 << KERNEL_PAGE_WIDTH)) {
/*
* The buffer is not covered by the 4M locked kernel DTLB entry
* and therefore it was given a dedicated locked DTLB entry.
/trunk/kernel/arch/sparc64/src/proc/thread.c
54,13 → 54,15
* Mind the possible alignment of the userspace window buffer
* belonging to a killed thread.
*/
frame_free(KA2PA(ALIGN_DOWN((uintptr_t) t->arch.uspace_window_buffer, PAGE_SIZE)));
frame_free(KA2PA(ALIGN_DOWN((uintptr_t)
t->arch.uspace_window_buffer, PAGE_SIZE)));
}
}
 
void thread_create_arch(thread_t *t)
{
if ((t->flags & THREAD_FLAG_USPACE) && (!t->arch.uspace_window_buffer)) {
if ((t->flags & THREAD_FLAG_USPACE) && (!t->arch.uspace_window_buffer))
{
/*
* The thread needs userspace window buffer and the object
* returned from the slab allocator doesn't have any.
73,7 → 75,8
* Mind the possible alignment of the userspace window buffer
* belonging to a killed thread.
*/
t->arch.uspace_window_buffer = (uint8_t *) ALIGN_DOWN(uw_buf, PAGE_SIZE);
t->arch.uspace_window_buffer = (uint8_t *) ALIGN_DOWN(uw_buf,
PAGE_SIZE);
}
}
 
/trunk/kernel/arch/sparc64/src/cpu/cpu.c
44,7 → 44,9
#include <arch/mm/tlb.h>
#include <macros.h>
 
/** Perform sparc64 specific initialization of the processor structure for the current processor. */
/** Perform sparc64 specific initialization of the processor structure for the
* current processor.
*/
void cpu_arch_init(void)
{
ofw_tree_node_t *node;
66,9 → 68,11
if (prop && prop->value) {
mid = *((uint32_t *) prop->value);
if (mid == CPU->arch.mid) {
prop = ofw_tree_getprop(node, "clock-frequency");
prop = ofw_tree_getprop(node,
"clock-frequency");
if (prop && prop->value)
clock_frequency = *((uint32_t *) prop->value);
clock_frequency = *((uint32_t *)
prop->value);
}
}
node = ofw_tree_find_peer_by_device_type(node, "cpu");
80,16 → 84,19
/*
* Lock CPU stack in DTLB.
*/
uintptr_t base = ALIGN_DOWN(config.base, 1<<KERNEL_PAGE_WIDTH);
uintptr_t base = ALIGN_DOWN(config.base, 1 << KERNEL_PAGE_WIDTH);
if (!overlaps((uintptr_t) CPU->stack, PAGE_SIZE, base, (1<<KERNEL_PAGE_WIDTH))) {
if (!overlaps((uintptr_t) CPU->stack, PAGE_SIZE, base, (1 <<
KERNEL_PAGE_WIDTH))) {
/*
* Kernel stack of this processor is not locked in DTLB.
* First, demap any already existing mappings.
* Second, create a locked mapping for it.
*/
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t) CPU->stack);
dtlb_insert_mapping((uintptr_t) CPU->stack, KA2PA(CPU->stack), PAGESIZE_8K, true, true);
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t)
CPU->stack);
dtlb_insert_mapping((uintptr_t) CPU->stack, KA2PA(CPU->stack),
PAGESIZE_8K, true, true);
}
}
 
103,7 → 110,8
*
* This function is called by the bootstrap processor.
*
* @param m Processor structure of the CPU for which version information is to be printed.
* @param m Processor structure of the CPU for which version information is to
* be printed.
*/
void cpu_print_report(cpu_t *m)
{
151,8 → 159,8
break;
}
 
printf("cpu%d: manuf=%s, impl=%s, mask=%d (%dMHz)\n",
m->id, manuf, impl, m->arch.ver.mask, m->arch.clock_frequency/1000000);
printf("cpu%d: manuf=%s, impl=%s, mask=%d (%dMHz)\n", m->id, manuf,
impl, m->arch.ver.mask, m->arch.clock_frequency / 1000000);
}
 
/** @}
/trunk/kernel/arch/sparc64/src/start.S
77,13 → 77,18
*/
 
wrpr %g0, NWINDOWS - 2, %cansave ! set maximum saveable windows
wrpr %g0, 0, %canrestore ! get rid of windows we will never need again
wrpr %g0, 0, %otherwin ! make sure the window state is consistent
wrpr %g0, NWINDOWS - 1, %cleanwin ! prevent needless clean_window traps for kernel
wrpr %g0, 0, %canrestore ! get rid of windows we will
! never need again
wrpr %g0, 0, %otherwin ! make sure the window state is
! consistent
wrpr %g0, NWINDOWS - 1, %cleanwin ! prevent needless clean_window
! traps for kernel
 
wrpr %g0, 0, %tl ! TL = 0, primary context register is used
wrpr %g0, 0, %tl ! TL = 0, primary context
! register is used
 
wrpr %g0, PSTATE_PRIV_BIT, %pstate ! Disable interrupts and disable 32-bit address masking.
wrpr %g0, PSTATE_PRIV_BIT, %pstate ! disable interrupts and disable
! 32-bit address masking
 
wrpr %g0, 0, %pil ! intialize %pil
 
94,20 → 99,19
wrpr %g1, %lo(trap_table), %tba
 
/*
* Take over the DMMU by installing global locked
* TTE entry identically mapping the first 4M
* of memory.
* Take over the DMMU by installing global locked TTE entry identically
* mapping the first 4M of memory.
*
* In case of DMMU, no FLUSH instructions need to be
* issued. Because of that, the old DTLB contents can
* be demapped pretty straightforwardly and without
* causing any traps.
* In case of DMMU, no FLUSH instructions need to be issued. Because of
* that, the old DTLB contents can be demapped pretty straightforwardly
* and without causing any traps.
*/
 
wr %g0, ASI_DMMU, %asi
 
#define SET_TLB_DEMAP_CMD(r1, context_id) \
set (TLB_DEMAP_CONTEXT<<TLB_DEMAP_TYPE_SHIFT) | (context_id<<TLB_DEMAP_CONTEXT_SHIFT), %r1
set (TLB_DEMAP_CONTEXT << TLB_DEMAP_TYPE_SHIFT) | (context_id << \
TLB_DEMAP_CONTEXT_SHIFT), %r1
! demap context 0
SET_TLB_DEMAP_CMD(g1, TLB_DEMAP_NUCLEUS)
115,7 → 119,7
membar #Sync
 
#define SET_TLB_TAG(r1, context) \
set VMA | (context<<TLB_TAG_ACCESS_CONTEXT_SHIFT), %r1
set VMA | (context << TLB_TAG_ACCESS_CONTEXT_SHIFT), %r1
 
! write DTLB tag
SET_TLB_TAG(g1, MEM_CONTEXT_KERNEL)
144,11 → 148,10
membar #Sync
 
/*
* Because we cannot use global mappings (because we want to
* have separate 64-bit address spaces for both the kernel
* and the userspace), we prepare the identity mapping also in
* context 1. This step is required by the
* code installing the ITLB mapping.
* Because we cannot use global mappings (because we want to have
* separate 64-bit address spaces for both the kernel and the
* userspace), we prepare the identity mapping also in context 1. This
* step is required by the code installing the ITLB mapping.
*/
! write DTLB tag of context 1 (i.e. MEM_CONTEXT_TEMP)
SET_TLB_TAG(g1, MEM_CONTEXT_TEMP)
161,25 → 164,23
membar #Sync
/*
* Now is time to take over the IMMU.
* Unfortunatelly, it cannot be done as easily as the DMMU,
* because the IMMU is mapping the code it executes.
* Now is time to take over the IMMU. Unfortunatelly, it cannot be done
* as easily as the DMMU, because the IMMU is mapping the code it
* executes.
*
* [ Note that brave experiments with disabling the IMMU
* and using the DMMU approach failed after a dozen
* of desparate days with only little success. ]
* [ Note that brave experiments with disabling the IMMU and using the
* DMMU approach failed after a dozen of desparate days with only little
* success. ]
*
* The approach used here is inspired from OpenBSD.
* First, the kernel creates IMMU mapping for itself
* in context 1 (MEM_CONTEXT_TEMP) and switches to
* it. Context 0 (MEM_CONTEXT_KERNEL) can be demapped
* afterwards and replaced with the kernel permanent
* mapping. Finally, the kernel switches back to
* context 0 and demaps context 1.
* The approach used here is inspired from OpenBSD. First, the kernel
* creates IMMU mapping for itself in context 1 (MEM_CONTEXT_TEMP) and
* switches to it. Context 0 (MEM_CONTEXT_KERNEL) can be demapped
* afterwards and replaced with the kernel permanent mapping. Finally,
* the kernel switches back to context 0 and demaps context 1.
*
* Moreover, the IMMU requires use of the FLUSH instructions.
* But that is OK because we always use operands with
* addresses already mapped by the taken over DTLB.
* Moreover, the IMMU requires use of the FLUSH instructions. But that
* is OK because we always use operands with addresses already mapped by
* the taken over DTLB.
*/
set kernel_image_start, %g5
291,9 → 292,8
 
#ifdef CONFIG_SMP
/*
* Active loop for APs until the BSP picks them up.
* A processor cannot leave the loop until the
* global variable 'waking_up_mid' equals its
* Active loop for APs until the BSP picks them up. A processor cannot
* leave the loop until the global variable 'waking_up_mid' equals its
* MID.
*/
set waking_up_mid, %g2
326,15 → 326,13
.section K_DATA_START, "aw", @progbits
 
/*
* Create small stack to be used by the bootstrap processor.
* It is going to be used only for a very limited period of
* time, but we switch to it anyway, just to be sure we are
* properly initialized.
* Create small stack to be used by the bootstrap processor. It is going to be
* used only for a very limited period of time, but we switch to it anyway,
* just to be sure we are properly initialized.
*
* What is important is that this piece of memory is covered
* by the 4M DTLB locked entry and therefore there will be
* no surprises like deadly combinations of spill trap and
* and TLB miss on the stack address.
* What is important is that this piece of memory is covered by the 4M DTLB
* locked entry and therefore there will be no surprises like deadly
* combinations of spill trap and and TLB miss on the stack address.
*/
 
#define INITIAL_STACK_SIZE 1024
354,14 → 352,16
.quad 0
 
/*
* This variable is used by the fast_data_MMU_miss trap handler.
* In runtime, it is further modified to reflect the starting address of
* physical memory.
* This variable is used by the fast_data_MMU_miss trap handler. In runtime, it
* is further modified to reflect the starting address of physical memory.
*/
.global kernel_8k_tlb_data_template
kernel_8k_tlb_data_template:
#ifdef CONFIG_VIRT_IDX_DCACHE
.quad ((1 << TTE_V_SHIFT) | (PAGESIZE_8K << TTE_SIZE_SHIFT) | TTE_CP | TTE_CV | TTE_P | TTE_W)
.quad ((1 << TTE_V_SHIFT) | (PAGESIZE_8K << TTE_SIZE_SHIFT) | TTE_CP | \
TTE_CV | TTE_P | TTE_W)
#else /* CONFIG_VIRT_IDX_DCACHE */
.quad ((1 << TTE_V_SHIFT) | (PAGESIZE_8K << TTE_SIZE_SHIFT) | TTE_CP | TTE_P | TTE_W)
.quad ((1 << TTE_V_SHIFT) | (PAGESIZE_8K << TTE_SIZE_SHIFT) | TTE_CP | \
TTE_P | TTE_W)
#endif /* CONFIG_VIRT_IDX_DCACHE */