Subversion Repositories HelenOS-historic

Compare Revisions

Regard whitespace Rev 915 → Rev 916

/kernel/trunk/arch/ia64/src/ivt.S
45,6 → 45,7
#define R_OFFS r16
#define R_HANDLER r17
#define R_RET r18
#define R_KSTACK_BSP r22 /* keep in sync with before_thread_runs_arch() */
#define R_KSTACK r23 /* keep in sync with before_thread_runs_arch() */
 
/** Heavyweight interrupt handler
56,8 → 57,8
* This goal is achieved by using procedure calls after RSE becomes operational.
*
* Some steps are skipped (enabling and disabling interrupts).
* Some steps are not fully supported yet (e.g. interruptions
* from userspace and floating-point context).
* Some steps are not fully supported yet (e.g. dealing with floating-point
* context).
*
* @param offs Offset from the beginning of IVT.
* @param handler Interrupt handler address.
89,26 → 90,27
mov r30 = cr.ipsr
shr.u r31 = r12, VRN_SHIFT ;;
 
shr.u r30 = r30, PSR_CPL_SHIFT ;;
and r30 = PSR_CPL_MASK_SHIFTED, r30 ;;
 
/*
* Set p6 to true if the stack register references kernel address space.
* Set p7 to false if the stack register doesn't reference kernel address space.
* Set p3 to true if the interrupted context executed in kernel mode.
* Set p4 to false if the interrupted context didn't execute in kernel mode.
*/
cmp.eq p6, p7 = VRN_KERNEL, r31 ;;
cmp.eq p3, p4 = r30, r0 ;;
cmp.eq p1, p2 = r30, r0 ;; /* remember IPSR setting in p1 and p2 */
(p6) shr.u r30 = r30, PSR_CPL_SHIFT ;;
(p6) and r30 = PSR_CPL_MASK_SHIFTED, r30 ;;
 
/*
* Set p6 to true if the interrupted context executed in kernel mode.
* Set p7 to false if the interrupted context didn't execute in kernel mode.
* Set p3 to true if the stack register references kernel address space.
* Set p4 to false if the stack register doesn't reference kernel address space.
*/
(p6) cmp.eq p6, p7 = r30, r0 ;;
(p3) cmp.eq p3, p4 = VRN_KERNEL, r31 ;;
/*
* Now, p7 is true iff the stack needs to be switched to kernel stack.
* Now, p4 is true iff the stack needs to be switched to kernel stack.
*/
mov r30 = r12
(p7) mov r12 = R_KSTACK ;;
(p4) mov r12 = R_KSTACK ;;
add r31 = -STACK_FRAME_BIAS, r12 ;;
add r12 = -STACK_FRAME_SIZE, r12
140,13 → 142,23
mov r27 = ar.rnat
mov r28 = ar.bspstore ;;
/* assume kernel backing store */
mov ar.bspstore = r28 ;;
/*
* Inspect BSPSTORE to figure out whether it is necessary to switch to kernel BSPSTORE.
*/
(p1) shr.u r30 = r28, VRN_SHIFT ;;
(p1) cmp.eq p1, p2 = VRN_KERNEL, r30 ;;
/*
* If BSPSTORE needs to be switched, p1 is false and p2 is true.
*/
(p1) mov r30 = r28
(p2) mov r30 = R_KSTACK_BSP ;;
(p2) mov ar.bspstore = r30 ;;
mov r29 = ar.bsp
st8 [r31] = r27, -8 ;; /* save ar.rnat */
st8 [r31] = r28, -8 ;; /* save new value written to ar.bspstore */
st8 [r31] = r30, -8 ;; /* save new value written to ar.bspstore */
st8 [r31] = r28, -8 ;; /* save ar.bspstore */
st8 [r31] = r29, -8 /* save ar.bsp */
/kernel/trunk/arch/ia64/src/proc/scheduler.c
31,11 → 31,12
#include <arch.h>
#include <arch/register.h>
#include <arch/context.h>
#include <arch/stack.h>
#include <arch/mm/tlb.h>
#include <config.h>
#include <align.h>
 
/** Record kernel stack address in bank 0 r23 and make sure it is mapped in DTR. */
/** Prepare kernel stack pointers in bank 0 r22 and r23 and make sure the stack is mapped in DTR. */
void before_thread_runs_arch(void)
{
__address base;
51,14 → 52,18
}
/*
* Record address of kernel stack to bank 0 r23
* where it will be found after switch from userspace.
* Record address of kernel backing store to bank 0 r22.
* Record address of kernel stack to bank 0 r23.
* These values will be found there after switch from userspace.
*/
__asm__ volatile (
"bsw.0\n"
"mov r23 = %0\n"
"mov r22 = %0\n"
"mov r23 = %1\n"
"bsw.1\n"
: : "r" (&THREAD->kstack[THREAD_STACK_SIZE - SP_DELTA]));
:
: "r" (((__address) THREAD->kstack) + ALIGN_UP(sizeof(the_t), REGISTER_STACK_ALIGNMENT)),
"r" (&THREAD->kstack[THREAD_STACK_SIZE - SP_DELTA]));
}
 
void after_thread_ran_arch(void)