Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 1854 → Rev 1855

/trunk/kernel/generic/include/macros.h
32,8 → 32,8
/** @file
*/
 
#ifndef __MACROS_H__
#define __MACROS_H__
#ifndef KERN_MACROS_H_
#define KERN_MACROS_H_
 
#include <arch/types.h>
#include <typedefs.h>
48,7 → 48,13
#define min(a,b) ((a) < (b) ? (a) : (b))
#define max(a,b) ((a) > (b) ? (a) : (b))
 
/** Return true if the interlvals overlap. */
/** Return true if the interlvals overlap.
*
* @param s1 Start address of the first interval.
* @param sz1 Size of the first interval.
* @param s2 Start address of the second interval.
* @param sz2 Size of the second interval.
*/
static inline int overlaps(uintptr_t s1, size_t sz1, uintptr_t s2, size_t sz2)
{
uintptr_t e1 = s1 + sz1;
/trunk/kernel/generic/src/main/kinit.c
154,7 → 154,7
 
#ifdef CONFIG_TEST
test();
printf("\nTest finished, please reboot\n");
printf("\nTest finished, please reboot.\n");
#else /* CONFIG_TEST */
 
task_t *utask;
/trunk/kernel/arch/sparc64/include/asm.h
315,6 → 315,12
__asm__ volatile ("stxa %0, [%1] %2\n" : : "r" (v), "r" (va), "i" (asi) : "memory");
}
 
/** Flush all valid register windows to memory. */
static inline void flushw(void)
{
__asm__ volatile ("flushw\n");
}
 
void cpu_halt(void);
void cpu_sleep(void);
void asm_delay_loop(uint32_t t);
/trunk/kernel/arch/sparc64/include/trap/trap_table.h
81,10 → 81,19
* The following needs to be in sync with the
* definition of the istate structure.
*/
#define PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE (STACK_WINDOW_SAVE_AREA_SIZE+(4*8))
#define PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE (STACK_WINDOW_SAVE_AREA_SIZE+(12*8))
#define SAVED_TSTATE -(1*8)
#define SAVED_TPC -(2*8)
#define SAVED_TNPC -(3*8)
#define SAVED_TNPC -(3*8) /* <-- istate_t begins here */
/* alignment gap */
#define SAVED_I0 -(5*8)
#define SAVED_I1 -(6*8)
#define SAVED_I2 -(7*8)
#define SAVED_I3 -(8*8)
#define SAVED_I4 -(9*8)
#define SAVED_I5 -(10*8)
#define SAVED_I6 -(11*8)
#define SAVED_I7 -(12*8)
 
.macro PREEMPTIBLE_HANDLER f
sethi %hi(\f), %g1
/trunk/kernel/arch/sparc64/src/proc/scheduler.c
35,10 → 35,12
#include <proc/scheduler.h>
#include <proc/thread.h>
#include <arch.h>
#include <arch/asm.h>
#include <arch/mm/tlb.h>
#include <arch/mm/page.h>
#include <config.h>
#include <align.h>
#include <macros.h>
 
/** Perform sparc64 specific tasks needed before the new task is run. */
void before_task_runs_arch(void)
45,7 → 47,11
{
}
 
/** Ensure that thread's kernel stack is locked in TLB. */
/** Perform sparc64 specific steps before scheduling a thread.
*
* Ensure that thread's kernel stack, as well as userspace window
* buffer for userspace threads, are locked in DTLB.
*/
void before_thread_runs_arch(void)
{
uintptr_t base;
52,18 → 58,38
base = ALIGN_DOWN(config.base, 1<<KERNEL_PAGE_WIDTH);
 
if ((uintptr_t) THREAD->kstack < base || (uintptr_t) THREAD->kstack > base + (1<<KERNEL_PAGE_WIDTH)) {
if (!overlaps((uintptr_t) THREAD->kstack, PAGE_SIZE, base, (1<<KERNEL_PAGE_WIDTH))) {
/*
* Kernel stack of this thread is not locked in DTLB.
* First, make sure it is not mapped already.
* If not, create a locked mapping for it.
*/
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t) THREAD->kstack);
dtlb_insert_mapping((uintptr_t) THREAD->kstack, KA2PA(THREAD->kstack), PAGESIZE_8K, true, true);
}
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t) THREAD->kstack);
dtlb_insert_mapping((uintptr_t) THREAD->kstack, KA2PA(THREAD->kstack), PAGESIZE_8K, true, true);
}
if ((THREAD->flags & THREAD_FLAG_USPACE)) {
/*
* If this thread executes also in userspace, we have to lock
* its userspace window buffer into DTLB.
*/
ASSERT(THREAD->arch.uspace_window_buffer);
uintptr_t uw_buf = (uintptr_t) THREAD->arch.uspace_window_buffer;
if (!overlaps(uw_buf, PAGE_SIZE, base, 1<<KERNEL_PAGE_WIDTH)) {
/*
* The buffer is not covered by the 4M locked kernel DTLB entry.
*/
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t) uw_buf);
dtlb_insert_mapping(uw_buf, KA2PA(uw_buf), PAGESIZE_8K, true, true);
}
}
}
 
/** Unlock thread's stack from TLB, if necessary. */
/** Perform sparc64 specific steps before a thread stops running.
*
* Demap any locked DTLB entries isntalled by the thread (i.e. kernel stack
* and userspace window buffer).
*/
void after_thread_ran_arch(void)
{
uintptr_t base;
70,7 → 96,7
 
base = ALIGN_DOWN(config.base, 1<<KERNEL_PAGE_WIDTH);
 
if ((uintptr_t) THREAD->kstack < base || (uintptr_t) THREAD->kstack > base + (1<<KERNEL_PAGE_WIDTH)) {
if (!overlaps((uintptr_t) THREAD->kstack, PAGE_SIZE, base, (1<<KERNEL_PAGE_WIDTH))) {
/*
* Kernel stack of this thread is locked in DTLB.
* Destroy the mapping.
77,6 → 103,27
*/
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t) THREAD->kstack);
}
if ((THREAD->flags & THREAD_FLAG_USPACE)) {
/*
* If this thread executes also in userspace, we have to force all
* its still-active userspace windows into the userspace window buffer
* and demap the buffer from DTLB.
*/
ASSERT(THREAD->arch.uspace_window_buffer);
flushw(); /* force all userspace windows into memory */
uintptr_t uw_buf = (uintptr_t) THREAD->arch.uspace_window_buffer;
if (!overlaps(uw_buf, PAGE_SIZE, base, 1<<KERNEL_PAGE_WIDTH)) {
/*
* The buffer is not covered by the 4M locked kernel DTLB entry
* and therefore it was given a dedicated locked DTLB entry.
* Demap it.
*/
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t) uw_buf);
}
}
}
 
/** @}
/trunk/kernel/arch/sparc64/src/trap/trap_table.S
412,12 → 412,33
 
/*
* Fix CWP.
* Just for reminder, the input registers in the current window
* are the output registers of the window to which we want to
* restore. Because the fill trap fills only input and local
* registers of a window, we need to preserve those output
* registers manually.
*/
mov %fp, %g1
flushw
mov %sp, %g1
stx %i0, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I0]
stx %i1, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I1]
stx %i2, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I2]
stx %i3, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I3]
stx %i4, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I4]
stx %i5, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I5]
stx %i6, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I6]
stx %i7, [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I7]
wrpr %l0, 0, %cwp
mov %g1, %fp
mov %g1, %sp
ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I0], %i0
ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I1], %i1
ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I2], %i2
ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I3], %i3
ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I4], %i4
ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I5], %i5
ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I6], %i6
ldx [%sp + PREEMPTIBLE_HANDLER_STACK_FRAME_SIZE + STACK_BIAS + SAVED_I7], %i7
 
/*
* OTHERWIN != 0 or fall-through from the OTHERWIN == 0 case.
*/