/branches/network/kernel/arch/amd64/include/atomic.h |
---|
41,17 → 41,17 |
static inline void atomic_inc(atomic_t *val) { |
#ifdef CONFIG_SMP |
asm volatile ("lock incq %0\n" : "=m" (val->count)); |
asm volatile ("lock incq %0\n" : "+m" (val->count)); |
#else |
asm volatile ("incq %0\n" : "=m" (val->count)); |
asm volatile ("incq %0\n" : "+m" (val->count)); |
#endif /* CONFIG_SMP */ |
} |
static inline void atomic_dec(atomic_t *val) { |
#ifdef CONFIG_SMP |
asm volatile ("lock decq %0\n" : "=m" (val->count)); |
asm volatile ("lock decq %0\n" : "+m" (val->count)); |
#else |
asm volatile ("decq %0\n" : "=m" (val->count)); |
asm volatile ("decq %0\n" : "+m" (val->count)); |
#endif /* CONFIG_SMP */ |
} |
61,7 → 61,7 |
asm volatile ( |
"lock xaddq %1, %0\n" |
: "=m" (val->count), "+r" (r) |
: "+m" (val->count), "+r" (r) |
); |
return r; |
73,14 → 73,14 |
asm volatile ( |
"lock xaddq %1, %0\n" |
: "=m" (val->count), "+r" (r) |
: "+m" (val->count), "+r" (r) |
); |
return r; |
} |
#define atomic_preinc(val) (atomic_postinc(val)+1) |
#define atomic_predec(val) (atomic_postdec(val)-1) |
#define atomic_preinc(val) (atomic_postinc(val) + 1) |
#define atomic_predec(val) (atomic_postdec(val) - 1) |
static inline uint64_t test_and_set(atomic_t *val) { |
uint64_t v; |
88,7 → 88,7 |
asm volatile ( |
"movq $1, %0\n" |
"xchgq %0, %1\n" |
: "=r" (v),"=m" (val->count) |
: "=r" (v), "+m" (val->count) |
); |
return v; |
102,20 → 102,20 |
preemption_disable(); |
asm volatile ( |
"0:;" |
"0:\n" |
#ifdef CONFIG_HT |
"pause;" |
"pause\n" |
#endif |
"mov %0, %1;" |
"testq %1, %1;" |
"jnz 0b;" /* Lightweight looping on locked spinlock */ |
"mov %0, %1\n" |
"testq %1, %1\n" |
"jnz 0b\n" /* lightweight looping on locked spinlock */ |
"incq %1;" /* now use the atomic operation */ |
"xchgq %0, %1;" |
"testq %1, %1;" |
"jnz 0b;" |
: "=m"(val->count),"=r"(tmp) |
); |
"incq %1\n" /* now use the atomic operation */ |
"xchgq %0, %1\n" |
"testq %1, %1\n" |
"jnz 0b\n" |
: "+m" (val->count), "=&r" (tmp) |
); |
/* |
* Prevent critical section code from bleeding out this way up. |
*/ |
/branches/network/kernel/arch/amd64/include/mm/page.h |
---|
52,8 → 52,6 |
#define PAGE_WIDTH FRAME_WIDTH |
#define PAGE_SIZE FRAME_SIZE |
#define PAGE_COLOR_BITS 0 /* dummy */ |
#ifdef KERNEL |
#ifndef __ASM__ |
/branches/network/kernel/arch/amd64/include/memstr.h |
---|
35,110 → 35,13 |
#ifndef KERN_amd64_MEMSTR_H_ |
#define KERN_amd64_MEMSTR_H_ |
/** Copy memory |
* |
* Copy a given number of bytes (3rd argument) |
* from the memory location defined by 2nd argument |
* to the memory location defined by 1st argument. |
* The memory areas cannot overlap. |
* |
* @param dst Destination |
* @param src Source |
* @param cnt Number of bytes |
* @return Destination |
*/ |
static inline void * memcpy(void * dst, const void * src, size_t cnt) |
{ |
unative_t d0, d1, d2; |
#define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt)) |
asm volatile( |
"rep movsq\n\t" |
"movq %4, %%rcx\n\t" |
"andq $7, %%rcx\n\t" |
"jz 1f\n\t" |
"rep movsb\n\t" |
"1:\n" |
: "=&c" (d0), "=&D" (d1), "=&S" (d2) |
: "0" ((unative_t)(cnt / 8)), "g" ((unative_t)cnt), "1" ((unative_t) dst), "2" ((unative_t) src) |
: "memory"); |
extern void memsetw(void *dst, size_t cnt, uint16_t x); |
extern void memsetb(void *dst, size_t cnt, uint8_t x); |
return dst; |
} |
extern int memcmp(const void *a, const void *b, size_t cnt); |
/** Compare memory regions for equality |
* |
* Compare a given number of bytes (3rd argument) |
* at memory locations defined by 1st and 2nd argument |
* for equality. If bytes are equal function returns 0. |
* |
* @param src Region 1 |
* @param dst Region 2 |
* @param cnt Number of bytes |
* @return Zero if bytes are equal, non-zero otherwise |
*/ |
static inline int memcmp(const void * src, const void * dst, size_t cnt) |
{ |
unative_t d0, d1, d2; |
unative_t ret; |
asm ( |
"repe cmpsb\n\t" |
"je 1f\n\t" |
"movq %3, %0\n\t" |
"addq $1, %0\n\t" |
"1:\n" |
: "=a" (ret), "=%S" (d0), "=&D" (d1), "=&c" (d2) |
: "0" (0), "1" (src), "2" (dst), "3" ((unative_t)cnt) |
); |
return ret; |
} |
/** Fill memory with words |
* Fill a given number of words (2nd argument) |
* at memory defined by 1st argument with the |
* word value defined by 3rd argument. |
* |
* @param dst Destination |
* @param cnt Number of words |
* @param x Value to fill |
*/ |
static inline void memsetw(uintptr_t dst, size_t cnt, uint16_t x) |
{ |
unative_t d0, d1; |
asm volatile ( |
"rep stosw\n\t" |
: "=&D" (d0), "=&c" (d1), "=a" (x) |
: "0" (dst), "1" ((unative_t)cnt), "2" (x) |
: "memory" |
); |
} |
/** Fill memory with bytes |
* Fill a given number of bytes (2nd argument) |
* at memory defined by 1st argument with the |
* word value defined by 3rd argument. |
* |
* @param dst Destination |
* @param cnt Number of bytes |
* @param x Value to fill |
*/ |
static inline void memsetb(uintptr_t dst, size_t cnt, uint8_t x) |
{ |
unative_t d0, d1; |
asm volatile ( |
"rep stosb\n\t" |
: "=&D" (d0), "=&c" (d1), "=a" (x) |
: "0" (dst), "1" ((unative_t)cnt), "2" (x) |
: "memory" |
); |
} |
#endif |
/** @} |
/branches/network/kernel/arch/amd64/include/types.h |
---|
35,10 → 35,6 |
#ifndef KERN_amd64_TYPES_H_ |
#define KERN_amd64_TYPES_H_ |
#define NULL 0 |
#define false 0 |
#define true 1 |
typedef signed char int8_t; |
typedef signed short int16_t; |
typedef signed int int32_t; |
61,14 → 57,31 |
typedef uint64_t unative_t; |
typedef int64_t native_t; |
typedef uint8_t bool; |
typedef uint64_t thread_id_t; |
typedef uint64_t task_id_t; |
typedef uint32_t context_id_t; |
/**< Formats for uintptr_t, size_t, count_t and index_t */ |
#define PRIp "llx" |
#define PRIs "llu" |
#define PRIc "llu" |
#define PRIi "llu" |
typedef int32_t inr_t; |
typedef int32_t devno_t; |
/**< Formats for (u)int8_t, (u)int16_t, (u)int32_t, (u)int64_t and (u)native_t */ |
#define PRId8 "d" |
#define PRId16 "d" |
#define PRId32 "d" |
#define PRId64 "lld" |
#define PRIdn "lld" |
#define PRIu8 "u" |
#define PRIu16 "u" |
#define PRIu32 "u" |
#define PRIu64 "llu" |
#define PRIun "llu" |
#define PRIx8 "x" |
#define PRIx16 "x" |
#define PRIx32 "x" |
#define PRIx64 "llx" |
#define PRIxn "llx" |
/** Page Table Entry. */ |
typedef struct { |
unsigned present : 1; |
/branches/network/kernel/arch/amd64/include/byteorder.h |
---|
35,15 → 35,9 |
#ifndef KERN_amd64_BYTEORDER_H_ |
#define KERN_amd64_BYTEORDER_H_ |
#include <byteorder.h> |
/* AMD64 is little-endian */ |
#define uint32_t_le2host(n) (n) |
#define uint64_t_le2host(n) (n) |
#define ARCH_IS_LITTLE_ENDIAN |
#define uint32_t_be2host(n) uint32_t_byteorder_swap(n) |
#define uint64_t_be2host(n) uint64_t_byteorder_swap(n) |
#endif |
/** @} |
/branches/network/kernel/arch/amd64/include/cpu.h |
---|
35,8 → 35,9 |
#ifndef KERN_amd64_CPU_H_ |
#define KERN_amd64_CPU_H_ |
#define RFLAGS_IF (1 << 9) |
#define RFLAGS_RF (1 << 16) |
#define RFLAGS_IF (1 << 9) |
#define RFLAGS_DF (1 << 10) |
#define RFLAGS_RF (1 << 16) |
#define EFER_MSR_NUM 0xc0000080 |
#define AMD_SCE_FLAG 0 |
/branches/network/kernel/arch/amd64/include/context_offset.h |
---|
37,6 → 37,43 |
#define OFFSET_R13 0x28 |
#define OFFSET_R14 0x30 |
#define OFFSET_R15 0x38 |
#define OFFSET_IPL 0x40 |
#ifdef KERNEL |
# define OFFSET_IPL 0x40 |
#else |
# define OFFSET_TLS 0x40 |
#endif |
#ifdef __ASM__ |
# ctx: address of the structure with saved context |
# pc: return address |
.macro CONTEXT_SAVE_ARCH_CORE ctx:req pc:req |
movq \pc, OFFSET_PC(\ctx) |
movq %rsp, OFFSET_SP(\ctx) |
movq %rbx, OFFSET_RBX(\ctx) |
movq %rbp, OFFSET_RBP(\ctx) |
movq %r12, OFFSET_R12(\ctx) |
movq %r13, OFFSET_R13(\ctx) |
movq %r14, OFFSET_R14(\ctx) |
movq %r15, OFFSET_R15(\ctx) |
.endm |
# ctx: address of the structure with saved context |
.macro CONTEXT_RESTORE_ARCH_CORE ctx:req pc:req |
movq OFFSET_R15(\ctx), %r15 |
movq OFFSET_R14(\ctx), %r14 |
movq OFFSET_R13(\ctx), %r13 |
movq OFFSET_R12(\ctx), %r12 |
movq OFFSET_RBP(\ctx), %rbp |
movq OFFSET_RBX(\ctx), %rbx |
movq OFFSET_SP(\ctx), %rsp # ctx->sp -> %rsp |
movq OFFSET_PC(\ctx), \pc |
.endm |
#endif |
#endif |
/branches/network/kernel/arch/amd64/include/context.h |
---|
35,6 → 35,8 |
#ifndef KERN_amd64_CONTEXT_H_ |
#define KERN_amd64_CONTEXT_H_ |
#ifdef KERNEL |
#include <arch/types.h> |
/* According to ABI the stack MUST be aligned on |
43,6 → 45,8 |
*/ |
#define SP_DELTA 16 |
#endif /* KERNEL */ |
/* We include only registers that must be preserved |
* during function call |
*/ |
/branches/network/kernel/arch/amd64/Makefile.inc |
---|
29,12 → 29,17 |
## Toolchain configuration |
# |
ifndef CROSS_PREFIX |
CROSS_PREFIX = /usr/local |
endif |
BFD_NAME = elf64-x86-64 |
BFD_ARCH = i386:x86-64 |
BFD = binary |
TARGET = amd64-linux-gnu |
TOOLCHAIN_DIR = /usr/local/amd64 |
TOOLCHAIN_DIR = $(CROSS_PREFIX)/amd64 |
FPU_NO_CFLAGS = -mno-sse -mno-sse2 |
CMN1 = -m64 -mcmodel=kernel -mno-red-zone -fno-unwind-tables |
GCC_CFLAGS += $(CMN1) |
ICC_CFLAGS += $(CMN1) |
/branches/network/kernel/arch/amd64/src/asm_utils.S |
---|
65,6 → 65,8 |
.global get_cycle |
.global read_efer_flag |
.global set_efer_flag |
.global memsetb |
.global memsetw |
.global memcpy |
.global memcpy_from_uspace |
.global memcpy_to_uspace |
71,6 → 73,14 |
.global memcpy_from_uspace_failover_address |
.global memcpy_to_uspace_failover_address |
# Wrapper for generic memsetb |
memsetb: |
jmp _memsetb |
# Wrapper for generic memsetw |
memsetw: |
jmp _memsetw |
#define MEMCPY_DST %rdi |
#define MEMCPY_SRC %rsi |
#define MEMCPY_SIZE %rdx |
88,12 → 98,12 |
* @param MEMCPY_SRC Source address. |
* @param MEMCPY_SIZE Number of bytes to copy. |
* |
* @retrun MEMCPY_SRC on success, 0 on failure. |
* @retrun MEMCPY_DST on success, 0 on failure. |
*/ |
memcpy: |
memcpy_from_uspace: |
memcpy_to_uspace: |
movq MEMCPY_SRC, %rax |
movq MEMCPY_DST, %rax |
movq MEMCPY_SIZE, %rcx |
shrq $3, %rcx /* size / 8 */ |
248,6 → 258,7 |
.endif |
save_all_gpr |
cld |
movq $(\i), %rdi # %rdi - first parameter |
movq %rsp, %rsi # %rsi - pointer to istate |
/branches/network/kernel/arch/amd64/src/userspace.c |
---|
61,6 → 61,8 |
"pushq %3\n" |
"pushq %4\n" |
"movq %5, %%rax\n" |
/* %rdi is defined to hold pcb_ptr - set it to 0 */ |
"xorq %%rdi, %%rdi\n" |
"iretq\n" |
: : |
"i" (gdtselector(UDATA_DES) | PL_USER), |
/branches/network/kernel/arch/amd64/src/debugger.c |
---|
106,25 → 106,33 |
{ |
unsigned int i; |
char *symbol; |
#ifdef __32_BITS__ |
printf("# Count Address In symbol\n"); |
printf("-- ----- ---------- ---------\n"); |
#endif |
#ifdef __64_BITS__ |
printf("# Count Address In symbol\n"); |
printf("-- ----- ------------------ ---------\n"); |
#endif |
if (sizeof(void *) == 4) { |
printf("# Count Address In symbol\n"); |
printf("-- ----- ---------- ---------\n"); |
} else { |
printf("# Count Address In symbol\n"); |
printf("-- ----- ------------------ ---------\n"); |
} |
for (i = 0; i < BKPOINTS_MAX; i++) |
if (breakpoints[i].address) { |
symbol = get_symtab_entry(breakpoints[i].address); |
if (sizeof(void *) == 4) |
printf("%-2u %-5d %#10zx %s\n", i, breakpoints[i].counter, |
breakpoints[i].address, symbol); |
else |
printf("%-2u %-5d %#18zx %s\n", i, breakpoints[i].counter, |
breakpoints[i].address, symbol); |
#ifdef __32_BITS__ |
printf("%-2u %-5d %#10zx %s\n", i, |
breakpoints[i].counter, breakpoints[i].address, |
symbol); |
#endif |
#ifdef __64_BITS__ |
printf("%-2u %-5d %#18zx %s\n", i, |
breakpoints[i].counter, breakpoints[i].address, |
symbol); |
#endif |
} |
return 1; |
} |
162,19 → 170,23 |
if ((flags & BKPOINT_INSTR)) { |
; |
} else { |
if (sizeof(int) == 4) |
dr7 |= ((unative_t) 0x3) << (18 + 4*curidx); |
else /* 8 */ |
dr7 |= ((unative_t) 0x2) << (18 + 4*curidx); |
#ifdef __32_BITS__ |
dr7 |= ((unative_t) 0x3) << (18 + 4 * curidx); |
#endif |
#ifdef __64_BITS__ |
dr7 |= ((unative_t) 0x2) << (18 + 4 * curidx); |
#endif |
if ((flags & BKPOINT_WRITE)) |
dr7 |= ((unative_t) 0x1) << (16 + 4*curidx); |
dr7 |= ((unative_t) 0x1) << (16 + 4 * curidx); |
else if ((flags & BKPOINT_READ_WRITE)) |
dr7 |= ((unative_t) 0x3) << (16 + 4*curidx); |
dr7 |= ((unative_t) 0x3) << (16 + 4 * curidx); |
} |
/* Enable global breakpoint */ |
dr7 |= 0x2 << (curidx*2); |
dr7 |= 0x2 << (curidx * 2); |
write_dr7(dr7); |
246,15 → 258,15 |
if ((breakpoints[slot].flags & BKPOINT_CHECK_ZERO)) { |
if (*((unative_t *) breakpoints[slot].address) != 0) |
return; |
printf("**** Found ZERO on address %lx (slot %d) ****\n", |
breakpoints[slot].address, slot); |
printf("*** Found ZERO on address %lx (slot %d) ***\n", |
breakpoints[slot].address, slot); |
} else { |
printf("Data watchpoint - new data: %lx\n", |
*((unative_t *) breakpoints[slot].address)); |
*((unative_t *) breakpoints[slot].address)); |
} |
} |
printf("Reached breakpoint %d:%lx(%s)\n", slot, getip(istate), |
get_symtab_entry(getip(istate))); |
get_symtab_entry(getip(istate))); |
printf("***Type 'exit' to exit kconsole.\n"); |
atomic_set(&haltstate,1); |
kconsole((void *) "debug"); |
292,7 → 304,8 |
/** Remove breakpoint from table */ |
int cmd_del_breakpoint(cmd_arg_t *argv) |
{ |
if (argv->intval < 0 || argv->intval > BKPOINTS_MAX) { |
unative_t bpno = argv->intval; |
if (bpno > BKPOINTS_MAX) { |
printf("Invalid breakpoint number.\n"); |
return 0; |
} |
346,7 → 359,9 |
} |
#ifdef CONFIG_SMP |
static void debug_ipi(int n __attribute__((unused)), istate_t *istate __attribute__((unused))) |
static void |
debug_ipi(int n __attribute__((unused)), |
istate_t *istate __attribute__((unused))) |
{ |
int i; |
362,7 → 377,7 |
{ |
int i; |
for (i=0; i<BKPOINTS_MAX; i++) |
for (i = 0; i < BKPOINTS_MAX; i++) |
breakpoints[i].address = NULL; |
cmd_initialize(&bkpts_info); |
383,11 → 398,9 |
panic("could not register command %s\n", addwatchp_info.name); |
#endif |
exc_register(VECTOR_DEBUG, "debugger", |
debug_exception); |
exc_register(VECTOR_DEBUG, "debugger", debug_exception); |
#ifdef CONFIG_SMP |
exc_register(VECTOR_DEBUG_IPI, "debugger_smp", |
debug_ipi); |
exc_register(VECTOR_DEBUG_IPI, "debugger_smp", debug_ipi); |
#endif |
} |
/branches/network/kernel/arch/amd64/src/pm.c |
---|
155,7 → 155,7 |
void tss_initialize(tss_t *t) |
{ |
memsetb((uintptr_t) t, sizeof(tss_t), 0); |
memsetb(t, sizeof(tss_t), 0); |
} |
/* |
239,7 → 239,7 |
preemption_disable(); |
ipl_t ipl = interrupts_disable(); |
memsetb((uintptr_t) idt, sizeof(idt), 0); |
memsetb(idt, sizeof(idt), 0); |
idtr_load(&idtr); |
interrupts_restore(ipl); |
/branches/network/kernel/arch/amd64/src/proc/thread.c |
---|
46,7 → 46,7 |
* Kernel RSP can be precalculated at thread creation time. |
*/ |
t->arch.syscall_rsp[SYSCALL_KSTACK_RSP] = |
(uintptr_t)&t->kstack[PAGE_SIZE - sizeof(uint64_t)]; |
(uintptr_t) &t->kstack[PAGE_SIZE - sizeof(uint64_t)]; |
} |
/** @} |
/branches/network/kernel/arch/amd64/src/syscall.c |
---|
62,9 → 62,11 |
write_msr(AMD_MSR_LSTAR, (uint64_t)syscall_entry); |
/* Mask RFLAGS on syscall |
* - disable interrupts, until we exchange the stack register |
* (mask the IE bit) |
* (mask the IF bit) |
* - clear DF so that the string instructions operate in |
* the right direction |
*/ |
write_msr(AMD_MSR_SFMASK, 0x200); |
write_msr(AMD_MSR_SFMASK, RFLAGS_IF | RFLAGS_DF); |
} |
/** @} |
/branches/network/kernel/arch/amd64/src/boot/boot.S |
---|
54,6 → 54,7 |
.long multiboot_image_start |
multiboot_image_start: |
cld |
movl $START_STACK, %esp # initialize stack pointer |
lgdtl bootstrap_gdtr # initialize Global Descriptor Table register |
126,7 → 127,6 |
mov $vesa_init, %esi |
mov $VESA_INIT_SEGMENT << 4, %edi |
mov $e_vesa_init - vesa_init, %ecx |
cld |
rep movsb |
mov $VESA_INIT_SEGMENT << 4, %edi |
282,7 → 282,6 |
movq $BOOT_OFFSET, %rsi |
movq $AP_BOOT_OFFSET, %rdi |
movq $_hardcoded_unmapped_size, %rcx |
cld |
rep movsb |
#endif |
556,7 → 555,6 |
addl %eax, %edi |
movw $0x0c00, %ax # black background, light red foreground |
cld |
ploop: |
lodsb |
/branches/network/kernel/arch/amd64/src/cpu/cpu.c |
---|
124,7 → 124,8 |
void cpu_arch_init(void) |
{ |
CPU->arch.tss = tss_p; |
CPU->arch.tss->iomap_base = &CPU->arch.tss->iomap[0] - ((uint8_t *) CPU->arch.tss); |
CPU->arch.tss->iomap_base = &CPU->arch.tss->iomap[0] - |
((uint8_t *) CPU->arch.tss); |
CPU->fpu_owner = NULL; |
} |
139,7 → 140,9 |
/* |
* Check for AMD processor. |
*/ |
if (info.cpuid_ebx==AMD_CPUID_EBX && info.cpuid_ecx==AMD_CPUID_ECX && info.cpuid_edx==AMD_CPUID_EDX) { |
if (info.cpuid_ebx == AMD_CPUID_EBX && |
info.cpuid_ecx == AMD_CPUID_ECX && |
info.cpuid_edx == AMD_CPUID_EDX) { |
CPU->arch.vendor = VendorAMD; |
} |
146,14 → 149,16 |
/* |
* Check for Intel processor. |
*/ |
if (info.cpuid_ebx==INTEL_CPUID_EBX && info.cpuid_ecx==INTEL_CPUID_ECX && info.cpuid_edx==INTEL_CPUID_EDX) { |
if (info.cpuid_ebx == INTEL_CPUID_EBX && |
info.cpuid_ecx == INTEL_CPUID_ECX && |
info.cpuid_edx == INTEL_CPUID_EDX) { |
CPU->arch.vendor = VendorIntel; |
} |
cpuid(1, &info); |
CPU->arch.family = (info.cpuid_eax>>8)&0xf; |
CPU->arch.model = (info.cpuid_eax>>4)&0xf; |
CPU->arch.stepping = (info.cpuid_eax>>0)&0xf; |
CPU->arch.family = (info.cpuid_eax >> 8) & 0xf; |
CPU->arch.model = (info.cpuid_eax >> 4) & 0xf; |
CPU->arch.stepping = (info.cpuid_eax >> 0) & 0xf; |
} |
} |
160,8 → 165,8 |
void cpu_print_report(cpu_t* m) |
{ |
printf("cpu%d: (%s family=%d model=%d stepping=%d) %dMHz\n", |
m->id, vendor_str[m->arch.vendor], m->arch.family, m->arch.model, m->arch.stepping, |
m->frequency_mhz); |
m->id, vendor_str[m->arch.vendor], m->arch.family, m->arch.model, |
m->arch.stepping, m->frequency_mhz); |
} |
/** @} |
/branches/network/kernel/arch/amd64/src/context.S |
---|
40,17 → 40,10 |
# |
context_save_arch: |
movq (%rsp), %rdx # the caller's return %eip |
# In %edi is passed 1st argument |
movq %rdx, OFFSET_PC(%rdi) |
movq %rsp, OFFSET_SP(%rdi) |
CONTEXT_SAVE_ARCH_CORE %rdi %rdx |
movq %rbx, OFFSET_RBX(%rdi) |
movq %rbp, OFFSET_RBP(%rdi) |
movq %r12, OFFSET_R12(%rdi) |
movq %r13, OFFSET_R13(%rdi) |
movq %r14, OFFSET_R14(%rdi) |
movq %r15, OFFSET_R15(%rdi) |
xorq %rax,%rax # context_save returns 1 |
incq %rax |
ret |
62,16 → 55,9 |
# pointed by the 1st argument. Returns 0 in EAX. |
# |
context_restore_arch: |
movq OFFSET_R15(%rdi), %r15 |
movq OFFSET_R14(%rdi), %r14 |
movq OFFSET_R13(%rdi), %r13 |
movq OFFSET_R12(%rdi), %r12 |
movq OFFSET_RBP(%rdi), %rbp |
movq OFFSET_RBX(%rdi), %rbx |
movq OFFSET_SP(%rdi), %rsp # ctx->sp -> %rsp |
movq OFFSET_PC(%rdi), %rdx |
CONTEXT_RESTORE_ARCH_CORE %rdi %rdx |
movq %rdx,(%rsp) |
xorq %rax,%rax # context_restore returns 0 |
/branches/network/kernel/arch/amd64/src/mm/page.c |
---|
82,7 → 82,7 |
void page_arch_init(void) |
{ |
uintptr_t cur; |
int i; |
unsigned int i; |
int identity_flags = PAGE_CACHEABLE | PAGE_EXEC | PAGE_GLOBAL | PAGE_WRITE; |
if (config.cpu_active == 1) { |