Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 413 → Rev 414

/SPARTAN/trunk/include/context.h
31,7 → 31,9
 
#include <arch/types.h>
#include <typedefs.h>
#include <arch/context.h>
 
 
#ifndef context_set
#define context_set(c, _pc, stack, size) \
(c)->pc = (__address) (_pc); \
38,7 → 40,45
(c)->sp = ((__address) (stack)) + (size) - SP_DELTA;
#endif /* context_set */
 
extern int context_save(context_t *c);
extern void context_restore(context_t *c) __attribute__ ((noreturn));
extern int context_save_arch(context_t *c);
extern void context_restore_arch(context_t *c) __attribute__ ((noreturn));
 
/** Save register context.
*
* Save current register context (including stack pointers)
* to context structure.
*
* Note that call to context_restore() will return at the same
* address as the corresponding call to context_save().
*
* @param c Context structure.
*
* @return context_save() returns 1, context_restore() returns 0.
*/
static inline int context_save(context_t *c)
{
return context_save_arch(c);
}
 
/** Restore register context.
*
* Restore previously saved register context (including stack pointers)
* from context structure.
*
* Note that this function does not normally return.
* Instead, it returns at the same address as the
* corresponding call to context_save(), the only
* difference being return value.
*
* Note that content of any local variable defined by
* the caller of context_save() is undefined after
* context_restore().
*
* @param c Context structure.
*/
static inline void context_restore(context_t *c)
{
context_restore_arch(c);
}
 
#endif
/SPARTAN/trunk/src/proc/scheduler.c
313,7 → 313,7
* scheduler_separated_stack().
*/
context_save(&CPU->saved_context);
context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), CPU->stack, CPU_STACK_SIZE);
context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE);
context_restore(&CPU->saved_context);
/* not reached */
}
/SPARTAN/trunk/src/proc/thread.c
195,7 → 195,7
t->ustack = (__u8 *) frame_us;
context_save(&t->saved_context);
context_set(&t->saved_context, FADDR(cushion), t->kstack, THREAD_STACK_SIZE);
context_set(&t->saved_context, FADDR(cushion), (__address) t->kstack, THREAD_STACK_SIZE);
the_initialize((the_t *) t->kstack);
 
/SPARTAN/trunk/src/main/main.c
246,7 → 246,7
* collide with another CPU coming up. To prevent this, we
* switch to this cpu's private stack prior to waking kmp up.
*/
context_set(&CPU->saved_context, FADDR(main_ap_separated_stack), CPU->stack, CPU_STACK_SIZE);
context_set(&CPU->saved_context, FADDR(main_ap_separated_stack), (__address) CPU->stack, CPU_STACK_SIZE);
context_restore(&CPU->saved_context);
/* not reached */
}
/SPARTAN/trunk/src/synch/waitq.c
30,6 → 30,7
#include <synch/synch.h>
#include <synch/spinlock.h>
#include <proc/thread.h>
#include <proc/scheduler.h>
#include <arch/asm.h>
#include <arch/types.h>
#include <time/timeout.h>
/SPARTAN/trunk/tools/amd64/gencontext.c
3,6 → 3,7
 
typedef long long __u64;
typedef __u64 ipl_t;
typedef __u64 __address;
 
#define __amd64_TYPES_H__
#include "../../arch/amd64/include/context.h"
/SPARTAN/trunk/tools/mips32/gencontext.c
1,8 → 1,8
#include <stdio.h>
 
 
typedef unsigned int __u32;
typedef __u32 ipl_t;
typedef __u32 __address;
 
#define __mips32_TYPES_H__
#include "../../arch/mips32/include/context.h"
/SPARTAN/trunk/arch/ia64/include/context.h
30,6 → 30,7
#define __ia64_CONTEXT_H__
 
#include <arch/types.h>
#include <typedefs.h>
#include <align.h>
 
#define STACK_ITEM_SIZE 16
50,7 → 51,7
#define context_set(c, _pc, stack, size) \
(c)->pc = (__address) _pc; \
(c)->bsp = ((__address) stack) + ALIGN(sizeof(the_t), STACK_ALIGNMENT); \
(c)->sp = ((__address) stack) + ALIGN((size) - SP_DELTA, STACK_ALIGNMENT);
(c)->sp = ((__address) stack) + ALIGN((size), STACK_ALIGNMENT) - SP_DELTA;
 
/*
* Only save registers that must be preserved across
65,7 → 66,7
__u64 ar_unat_caller;
__u64 ar_unat_callee;
__u64 ar_rsc;
__u64 bsp; /* ar_bsp */
__address bsp; /* ar_bsp */
__u64 ar_rnat;
__u64 ar_lc;
 
77,13 → 78,13
__u64 r5;
__u64 r6;
__u64 r7;
__u64 sp; /* r12 */
__address sp; /* r12 */
__u64 r13;
/*
* Branch registers
*/
__u64 pc; /* b0 */
__address pc; /* b0 */
__u64 b1;
__u64 b2;
__u64 b3;
/SPARTAN/trunk/arch/ia64/src/context.S
28,10 → 28,10
 
.text
 
.global context_save
.global context_restore
.global context_save_arch
.global context_restore_arch
 
context_save:
context_save_arch:
alloc loc0 = ar.pfs, 1, 8, 0, 0
mov loc1 = ar.unat ;;
/* loc2 */
112,7 → 112,7
add r8 = r0, r0, 1 /* context_save returns 1 */
br.ret.sptk.many b0
 
context_restore:
context_restore_arch:
alloc loc0 = ar.pfs, 1, 8, 0, 0 ;;
 
ld8 loc0 = [in0], 8 ;; /* load ar.pfs */
/SPARTAN/trunk/arch/ppc32/include/context.h
35,7 → 35,7
 
struct context {
__u32 r0;
__u32 sp;
__address sp;
__u32 r2;
__u32 r3;
__u32 r4;
66,7 → 66,7
__u32 r29;
__u32 r30;
__u32 r31;
__u32 pc;
__address pc;
ipl_t ipl;
} __attribute__ ((packed));
 
/SPARTAN/trunk/arch/ppc32/src/context.S
30,10 → 30,10
 
.text
 
.global context_save
.global context_restore
.global context_save_arch
.global context_restore_arch
 
context_save:
context_save_arch:
REGISTERS_STORE r3
mflr r3
43,7 → 43,7
li r3, 1
blr
context_restore:
context_restore_arch:
REGISTERS_LOAD r3
lwz r3, 128(r3)
/SPARTAN/trunk/arch/amd64/include/context.h
44,8 → 44,8
* during function call
*/
struct context {
__u64 sp;
__u64 pc;
__address sp;
__address pc;
__u64 rbx;
__u64 rbp;
/SPARTAN/trunk/arch/amd64/src/context.S
28,17 → 28,17
 
.text
 
.global context_save
.global context_restore
.global context_save_arch
.global context_restore_arch
 
#include <arch/context_offset.h>
 
## Save current CPU context
#
# Save CPU context to the kernel_context variable
# Save CPU context to context_t variable
# pointed by the 1st argument. Returns 1 in EAX.
#
context_save:
context_save_arch:
movq (%rsp), %rdx # the caller's return %eip
# In %edi is passed 1st argument
movq %rdx, OFFSET_PC(%rdi)
58,10 → 58,10
 
## Restore current CPU context
#
# Restore CPU context from the kernel_context variable
# Restore CPU context from context_t variable
# pointed by the 1st argument. Returns 0 in EAX.
#
context_restore:
context_restore_arch:
movq OFFSET_R15(%rdi), %r15
movq OFFSET_R14(%rdi), %r14
movq OFFSET_R13(%rdi), %r13
/SPARTAN/trunk/arch/mips32/include/context.h
48,8 → 48,8
* function calls.
*/
struct context {
__u32 sp;
__u32 pc;
__address sp;
__address pc;
__u32 s0;
__u32 s1;
/SPARTAN/trunk/arch/mips32/src/context.S
36,8 → 36,8
.set noreorder
.set nomacro
 
.global context_save
.global context_restore
.global context_save_arch
.global context_restore_arch
 
.macro CONTEXT_STORE r
sw $s0,OFFSET_S0(\r)
72,7 → 72,7
.endm
 
context_save:
context_save_arch:
CONTEXT_STORE $a0
 
# context_save returns 1
79,7 → 79,7
j $31
li $2, 1
context_restore:
context_restore_arch:
CONTEXT_LOAD $a0
 
# context_restore returns 0
/SPARTAN/trunk/arch/ia32/include/context.h
46,8 → 46,8
* function calls.
*/
struct context {
__u32 sp;
__u32 pc;
__address sp;
__address pc;
__u32 ebx;
__u32 esi;
__u32 edi;
/SPARTAN/trunk/arch/ia32/src/context.s
28,16 → 28,16
 
.text
 
.global context_save
.global context_restore
.global context_save_arch
.global context_restore_arch
 
 
## Save current CPU context
#
# Save CPU context to the kernel_context variable
# Save CPU context to the context_t variable
# pointed by the 1st argument. Returns 1 in EAX.
#
context_save:
context_save_arch:
movl 0(%esp),%eax # the caller's return %eip
movl 4(%esp),%edx # address of the kernel_context variable to save context to
 
53,12 → 53,12
ret
 
 
## Restore current CPU context
## Restore saved CPU context
#
# Restore CPU context from the kernel_context variable
# Restore CPU context from context_t variable
# pointed by the 1st argument. Returns 0 in EAX.
#
context_restore:
context_restore_arch:
movl 4(%esp),%eax # address of the kernel_context variable to restore context from
movl 0(%eax),%esp # ctx->sp -> %esp
movl 4(%eax),%edx # ctx->pc -> %edx