Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 1735 → Rev 1780

/kernel/trunk/arch/amd64/include/interrupt.h
70,26 → 70,26
 
/** This is passed to interrupt handlers */
struct istate {
__u64 rax;
__u64 rbx;
__u64 rcx;
__u64 rdx;
__u64 rsi;
__u64 rdi;
__u64 r8;
__u64 r9;
__u64 r10;
__u64 r11;
__u64 r12;
__u64 r13;
__u64 r14;
__u64 r15;
__u64 rbp;
__u64 error_word;
__u64 rip;
__u64 cs;
__u64 rflags;
__u64 stack[]; /* Additional data on stack */
uint64_t rax;
uint64_t rbx;
uint64_t rcx;
uint64_t rdx;
uint64_t rsi;
uint64_t rdi;
uint64_t r8;
uint64_t r9;
uint64_t r10;
uint64_t r11;
uint64_t r12;
uint64_t r13;
uint64_t r14;
uint64_t r15;
uint64_t rbp;
uint64_t error_word;
uint64_t rip;
uint64_t cs;
uint64_t rflags;
uint64_t stack[]; /* Additional data on stack */
};
 
/** Return true if exception happened while in userspace */
98,17 → 98,17
return !(istate->rip & 0x8000000000000000);
}
 
static inline void istate_set_retaddr(istate_t *istate, __address retaddr)
static inline void istate_set_retaddr(istate_t *istate, uintptr_t retaddr)
{
istate->rip = retaddr;
}
static inline __native istate_get_pc(istate_t *istate)
static inline unative_t istate_get_pc(istate_t *istate)
{
return istate->rip;
}
 
extern void (* disable_irqs_function)(__u16 irqmask);
extern void (* enable_irqs_function)(__u16 irqmask);
extern void (* disable_irqs_function)(uint16_t irqmask);
extern void (* enable_irqs_function)(uint16_t irqmask);
extern void (* eoi_function)(void);
 
extern void print_info_errcode(int n, istate_t *istate);
120,8 → 120,8
extern void syscall(int n, istate_t *istate);
extern void tlb_shootdown_ipi(int n, istate_t *istate);
 
extern void trap_virtual_enable_irqs(__u16 irqmask);
extern void trap_virtual_disable_irqs(__u16 irqmask);
extern void trap_virtual_enable_irqs(uint16_t irqmask);
extern void trap_virtual_disable_irqs(uint16_t irqmask);
extern void trap_virtual_eoi(void);
/* AMD64 - specific page handler */
extern void ident_page_fault(int n, istate_t *istate);
/kernel/trunk/arch/amd64/include/byteorder.h
36,8 → 36,8
#define __amd64_BYTEORDER_H__
 
/* AMD64 is little-endian */
#define __native_le2host(n) (n)
#define __u64_le2host(n) (n)
#define unative_t_le2host(n) (n)
#define uint64_t_le2host(n) (n)
 
#endif
 
/kernel/trunk/arch/amd64/include/cpuid.h
47,18 → 47,18
#include <arch/types.h>
 
struct cpu_info {
__u32 cpuid_eax;
__u32 cpuid_ebx;
__u32 cpuid_ecx;
__u32 cpuid_edx;
uint32_t cpuid_eax;
uint32_t cpuid_ebx;
uint32_t cpuid_ecx;
uint32_t cpuid_edx;
} __attribute__ ((packed));
 
extern int has_cpuid(void);
 
extern void cpuid(__u32 cmd, cpu_info_t *info);
extern void cpuid(uint32_t cmd, cpu_info_t *info);
 
 
extern __u64 rdtsc(void);
extern uint64_t rdtsc(void);
 
#endif /* __ASM__ */
#endif
/kernel/trunk/arch/amd64/include/types.h
26,7 → 26,7
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup amd64
/** @addtogroup amd64
* @{
*/
/** @file
37,29 → 37,28
 
#define NULL 0
 
typedef signed char __s8;
typedef signed short __s16;
typedef signed int __s32;
typedef signed long long __s64;
typedef signed char int8_t;
typedef signed short int16_t;
typedef signed int int32_t;
typedef signed long long int64_t;
 
typedef unsigned char __u8;
typedef unsigned short __u16;
typedef unsigned int __u32;
typedef unsigned long long __u64;
typedef unsigned char uint8_t;
typedef unsigned short uint16_t;
typedef unsigned int uint32_t;
typedef unsigned long long uint64_t;
 
typedef __u64 __address;
typedef __u64 pfn_t;
typedef uint64_t uintptr_t;
typedef uint64_t pfn_t;
 
/* Flags of processor (return value of interrupts_disable()) */
typedef __u64 ipl_t;
typedef uint64_t ipl_t;
 
typedef __u64 __native;
typedef __s64 __snative;
typedef uint64_t unative_t;
typedef int64_t native_t;
 
typedef struct page_specifier pte_t;
 
#endif
 
/** @}
/** @}
*/
 
/kernel/trunk/arch/amd64/include/memstr.h
49,7 → 49,7
*/
static inline void * memcpy(void * dst, const void * src, size_t cnt)
{
__native d0, d1, d2;
unative_t d0, d1, d2;
 
__asm__ __volatile__(
"rep movsq\n\t"
59,7 → 59,7
"rep movsb\n\t"
"1:\n"
: "=&c" (d0), "=&D" (d1), "=&S" (d2)
: "0" ((__native)(cnt / 8)), "g" ((__native)cnt), "1" ((__native) dst), "2" ((__native) src)
: "0" ((unative_t)(cnt / 8)), "g" ((unative_t)cnt), "1" ((unative_t) dst), "2" ((unative_t) src)
: "memory");
 
return dst;
79,8 → 79,8
*/
static inline int memcmp(const void * src, const void * dst, size_t cnt)
{
__native d0, d1, d2;
__native ret;
unative_t d0, d1, d2;
unative_t ret;
__asm__ (
"repe cmpsb\n\t"
89,7 → 89,7
"addq $1, %0\n\t"
"1:\n"
: "=a" (ret), "=%S" (d0), "=&D" (d1), "=&c" (d2)
: "0" (0), "1" (src), "2" (dst), "3" ((__native)cnt)
: "0" (0), "1" (src), "2" (dst), "3" ((unative_t)cnt)
);
return ret;
104,14 → 104,14
* @param cnt Number of words
* @param x Value to fill
*/
static inline void memsetw(__address dst, size_t cnt, __u16 x)
static inline void memsetw(uintptr_t dst, size_t cnt, uint16_t x)
{
__native d0, d1;
unative_t d0, d1;
__asm__ __volatile__ (
"rep stosw\n\t"
: "=&D" (d0), "=&c" (d1), "=a" (x)
: "0" (dst), "1" ((__native)cnt), "2" (x)
: "0" (dst), "1" ((unative_t)cnt), "2" (x)
: "memory"
);
 
126,14 → 126,14
* @param cnt Number of bytes
* @param x Value to fill
*/
static inline void memsetb(__address dst, size_t cnt, __u8 x)
static inline void memsetb(uintptr_t dst, size_t cnt, uint8_t x)
{
__native d0, d1;
unative_t d0, d1;
__asm__ __volatile__ (
"rep stosb\n\t"
: "=&D" (d0), "=&c" (d1), "=a" (x)
: "0" (dst), "1" ((__native)cnt), "2" (x)
: "0" (dst), "1" ((unative_t)cnt), "2" (x)
: "memory"
);
 
/kernel/trunk/arch/amd64/include/atomic.h
83,8 → 83,8
#define atomic_preinc(val) (atomic_postinc(val)+1)
#define atomic_predec(val) (atomic_postdec(val)-1)
 
static inline __u64 test_and_set(atomic_t *val) {
__u64 v;
static inline uint64_t test_and_set(atomic_t *val) {
uint64_t v;
__asm__ volatile (
"movq $1, %0\n"
99,7 → 99,7
/** amd64 specific fast spinlock */
static inline void atomic_lock_arch(atomic_t *val)
{
__u64 tmp;
uint64_t tmp;
 
preemption_disable();
__asm__ volatile (
/kernel/trunk/arch/amd64/include/pm.h
140,34 → 140,34
typedef struct idescriptor idescriptor_t;
 
struct ptr_16_64 {
__u16 limit;
__u64 base;
uint16_t limit;
uint64_t base;
} __attribute__ ((packed));
typedef struct ptr_16_64 ptr_16_64_t;
 
struct ptr_16_32 {
__u16 limit;
__u32 base;
uint16_t limit;
uint32_t base;
} __attribute__ ((packed));
typedef struct ptr_16_32 ptr_16_32_t;
 
struct tss {
__u32 reserve1;
__u64 rsp0;
__u64 rsp1;
__u64 rsp2;
__u64 reserve2;
__u64 ist1;
__u64 ist2;
__u64 ist3;
__u64 ist4;
__u64 ist5;
__u64 ist6;
__u64 ist7;
__u64 reserve3;
__u16 reserve4;
__u16 iomap_base;
__u8 iomap[TSS_IOMAP_SIZE];
uint32_t reserve1;
uint64_t rsp0;
uint64_t rsp1;
uint64_t rsp2;
uint64_t reserve2;
uint64_t ist1;
uint64_t ist2;
uint64_t ist3;
uint64_t ist4;
uint64_t ist5;
uint64_t ist6;
uint64_t ist7;
uint64_t reserve3;
uint16_t reserve4;
uint16_t iomap_base;
uint8_t iomap[TSS_IOMAP_SIZE];
} __attribute__ ((packed));
typedef struct tss tss_t;
 
182,11 → 182,11
 
extern void pm_init(void);
 
extern void gdt_tss_setbase(descriptor_t *d, __address base);
extern void gdt_tss_setlimit(descriptor_t *d, __u32 limit);
extern void gdt_tss_setbase(descriptor_t *d, uintptr_t base);
extern void gdt_tss_setlimit(descriptor_t *d, uint32_t limit);
 
extern void idt_init(void);
extern void idt_setoffset(idescriptor_t *d, __address offset);
extern void idt_setoffset(idescriptor_t *d, uintptr_t offset);
 
extern void tss_initialize(tss_t *t);
 
/kernel/trunk/arch/amd64/include/proc/thread.h
38,7 → 38,7
#include <arch/types.h>
 
typedef struct {
__native tls;
unative_t tls;
} thread_arch_t;
 
#endif
/kernel/trunk/arch/amd64/include/asm.h
39,8 → 39,8
#include <arch/types.h>
#include <config.h>
 
extern void asm_delay_loop(__u32 t);
extern void asm_fake_loop(__u32 t);
extern void asm_delay_loop(uint32_t t);
extern void asm_fake_loop(uint32_t t);
 
/** Return base address of current stack.
*
48,11 → 48,11
* The stack is assumed to be STACK_SIZE bytes long.
* The stack must start on page boundary.
*/
static inline __address get_stack_base(void)
static inline uintptr_t get_stack_base(void)
{
__address v;
uintptr_t v;
__asm__ volatile ("andq %%rsp, %0\n" : "=r" (v) : "0" (~((__u64)STACK_SIZE-1)));
__asm__ volatile ("andq %%rsp, %0\n" : "=r" (v) : "0" (~((uint64_t)STACK_SIZE-1)));
return v;
}
68,7 → 68,7
* @param port Port to read from
* @return Value read
*/
static inline __u8 inb(__u16 port) { __u8 val; __asm__ volatile ("inb %w1, %b0 \n" : "=a" (val) : "d" (port) ); return val; }
static inline uint8_t inb(uint16_t port) { uint8_t val; __asm__ volatile ("inb %w1, %b0 \n" : "=a" (val) : "d" (port) ); return val; }
 
/** Byte to port
*
77,7 → 77,7
* @param port Port to write to
* @param val Value to write
*/
static inline void outb(__u16 port, __u8 val) { __asm__ volatile ("outb %b0, %w1\n" : : "a" (val), "d" (port) ); }
static inline void outb(uint16_t port, uint8_t val) { __asm__ volatile ("outb %b0, %w1\n" : : "a" (val), "d" (port) ); }
 
/** Swap Hidden part of GS register with visible one */
static inline void swapgs(void) { __asm__ volatile("swapgs"); }
149,23 → 149,23
}
 
/** Write to MSR */
static inline void write_msr(__u32 msr, __u64 value)
static inline void write_msr(uint32_t msr, uint64_t value)
{
__asm__ volatile (
"wrmsr;" : : "c" (msr),
"a" ((__u32)(value)),
"d" ((__u32)(value >> 32))
"a" ((uint32_t)(value)),
"d" ((uint32_t)(value >> 32))
);
}
 
static inline __native read_msr(__u32 msr)
static inline unative_t read_msr(uint32_t msr)
{
__u32 ax, dx;
uint32_t ax, dx;
 
__asm__ volatile (
"rdmsr;" : "=a"(ax), "=d"(dx) : "c" (msr)
);
return ((__u64)dx << 32) | ax;
return ((uint64_t)dx << 32) | ax;
}
 
 
187,9 → 187,9
);
}
 
static inline __address * get_ip()
static inline uintptr_t * get_ip()
{
__address *ip;
uintptr_t *ip;
 
__asm__ volatile (
"mov %%rip, %0"
202,9 → 202,9
*
* @param addr Address on a page whose TLB entry is to be invalidated.
*/
static inline void invlpg(__address addr)
static inline void invlpg(uintptr_t addr)
{
__asm__ volatile ("invlpg %0\n" :: "m" (*((__native *)addr)));
__asm__ volatile ("invlpg %0\n" :: "m" (*((unative_t *)addr)));
}
 
/** Load GDTR register from memory.
238,19 → 238,19
*
* @param sel Selector specifying descriptor of TSS segment.
*/
static inline void tr_load(__u16 sel)
static inline void tr_load(uint16_t sel)
{
__asm__ volatile ("ltr %0" : : "r" (sel));
}
 
#define GEN_READ_REG(reg) static inline __native read_ ##reg (void) \
#define GEN_READ_REG(reg) static inline unative_t read_ ##reg (void) \
{ \
__native res; \
unative_t res; \
__asm__ volatile ("movq %%" #reg ", %0" : "=r" (res) ); \
return res; \
}
 
#define GEN_WRITE_REG(reg) static inline void write_ ##reg (__native regn) \
#define GEN_WRITE_REG(reg) static inline void write_ ##reg (unative_t regn) \
{ \
__asm__ volatile ("movq %0, %%" #reg : : "r" (regn)); \
}
/kernel/trunk/arch/amd64/include/faddr.h
26,7 → 26,7
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup amd64
/** @addtogroup amd64
* @{
*/
/** @file
37,10 → 37,9
 
#include <arch/types.h>
 
#define FADDR(fptr) ((__address) (fptr))
#define FADDR(fptr) ((uintptr_t) (fptr))
 
#endif
 
/** @}
/** @}
*/
 
/kernel/trunk/arch/amd64/include/mm/frame.h
44,7 → 44,7
 
 
#ifndef __ASM__
extern __address last_frame;
extern uintptr_t last_frame;
extern void frame_arch_init(void);
#endif /* __ASM__ */
 
/kernel/trunk/arch/amd64/include/mm/page.h
26,7 → 26,7
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup amd64mm
/** @addtogroup amd64mm
* @{
*/
/** @file
60,7 → 60,7
#endif
 
#ifndef __ASM__
static inline __address ka2pa(__address x)
static inline uintptr_t ka2pa(uintptr_t x)
{
if (x > 0xffffffff80000000)
return x - 0xffffffff80000000;
67,9 → 67,9
else
return x - 0xffff800000000000;
}
# define KA2PA(x) ka2pa((__address)x)
# define PA2KA_CODE(x) (((__address) (x)) + 0xffffffff80000000)
# define PA2KA(x) (((__address) (x)) + 0xffff800000000000)
# define KA2PA(x) ka2pa((uintptr_t)x)
# define PA2KA_CODE(x) (((uintptr_t) (x)) + 0xffffffff80000000)
# define PA2KA(x) (((uintptr_t) (x)) + 0xffff800000000000)
#else
# define KA2PA(x) ((x) - 0xffffffff80000000)
# define PA2KA(x) ((x) + 0xffffffff80000000)
85,12 → 85,12
#define PTL2_INDEX_ARCH(vaddr) (((vaddr)>>21)&0x1ff)
#define PTL3_INDEX_ARCH(vaddr) (((vaddr)>>12)&0x1ff)
 
#define GET_PTL1_ADDRESS_ARCH(ptl0, i) ((pte_t *) ((((__u64) ((pte_t *)(ptl0))[(i)].addr_12_31)<<12) | (((__u64) ((pte_t *)(ptl0))[(i)].addr_32_51)<<32 )))
#define GET_PTL2_ADDRESS_ARCH(ptl1, i) ((pte_t *) ((((__u64) ((pte_t *)(ptl1))[(i)].addr_12_31)<<12) | (((__u64) ((pte_t *)(ptl1))[(i)].addr_32_51)<<32 )))
#define GET_PTL3_ADDRESS_ARCH(ptl2, i) ((pte_t *) ((((__u64) ((pte_t *)(ptl2))[(i)].addr_12_31)<<12) | (((__u64) ((pte_t *)(ptl2))[(i)].addr_32_51)<<32 )))
#define GET_FRAME_ADDRESS_ARCH(ptl3, i) ((__address *) ((((__u64) ((pte_t *)(ptl3))[(i)].addr_12_31)<<12) | (((__u64) ((pte_t *)(ptl3))[(i)].addr_32_51)<<32 )))
#define GET_PTL1_ADDRESS_ARCH(ptl0, i) ((pte_t *) ((((uint64_t) ((pte_t *)(ptl0))[(i)].addr_12_31)<<12) | (((uint64_t) ((pte_t *)(ptl0))[(i)].addr_32_51)<<32 )))
#define GET_PTL2_ADDRESS_ARCH(ptl1, i) ((pte_t *) ((((uint64_t) ((pte_t *)(ptl1))[(i)].addr_12_31)<<12) | (((uint64_t) ((pte_t *)(ptl1))[(i)].addr_32_51)<<32 )))
#define GET_PTL3_ADDRESS_ARCH(ptl2, i) ((pte_t *) ((((uint64_t) ((pte_t *)(ptl2))[(i)].addr_12_31)<<12) | (((uint64_t) ((pte_t *)(ptl2))[(i)].addr_32_51)<<32 )))
#define GET_FRAME_ADDRESS_ARCH(ptl3, i) ((uintptr_t *) ((((uint64_t) ((pte_t *)(ptl3))[(i)].addr_12_31)<<12) | (((uint64_t) ((pte_t *)(ptl3))[(i)].addr_32_51)<<32 )))
 
#define SET_PTL0_ADDRESS_ARCH(ptl0) (write_cr3((__address) (ptl0)))
#define SET_PTL0_ADDRESS_ARCH(ptl0) (write_cr3((uintptr_t) (ptl0)))
#define SET_PTL1_ADDRESS_ARCH(ptl0, i, a) set_pt_addr((pte_t *)(ptl0), (index_t)(i), a)
#define SET_PTL2_ADDRESS_ARCH(ptl1, i, a) set_pt_addr((pte_t *)(ptl1), (index_t)(i), a)
#define SET_PTL3_ADDRESS_ARCH(ptl2, i, a) set_pt_addr((pte_t *)(ptl2), (index_t)(i), a)
106,9 → 106,9
#define SET_PTL3_FLAGS_ARCH(ptl2, i, x) set_pt_flags((pte_t *)(ptl2), (index_t)(i), (x))
#define SET_FRAME_FLAGS_ARCH(ptl3, i, x) set_pt_flags((pte_t *)(ptl3), (index_t)(i), (x))
 
#define PTE_VALID_ARCH(p) (*((__u64 *) (p)) != 0)
#define PTE_VALID_ARCH(p) (*((uint64_t *) (p)) != 0)
#define PTE_PRESENT_ARCH(p) ((p)->present != 0)
#define PTE_GET_FRAME_ARCH(p) ((((__address)(p)->addr_12_31)<<12) | ((__address)(p)->addr_32_51<<32))
#define PTE_GET_FRAME_ARCH(p) ((((uintptr_t)(p)->addr_12_31)<<12) | ((uintptr_t)(p)->addr_32_51<<32))
#define PTE_WRITABLE_ARCH(p) ((p)->writeable != 0)
#define PTE_EXECUTABLE_ARCH(p) ((p)->no_execute == 0)
 
164,7 → 164,7
);
}
 
static inline void set_pt_addr(pte_t *pt, index_t i, __address a)
static inline void set_pt_addr(pte_t *pt, index_t i, uintptr_t a)
{
pte_t *p = &pt[i];
 
197,6 → 197,5
 
#endif
 
/** @}
/** @}
*/
 
/kernel/trunk/arch/amd64/include/context.h
50,16 → 50,16
* during function call
*/
struct context {
__address sp;
__address pc;
uintptr_t sp;
uintptr_t pc;
__u64 rbx;
__u64 rbp;
uint64_t rbx;
uint64_t rbp;
 
__u64 r12;
__u64 r13;
__u64 r14;
__u64 r15;
uint64_t r12;
uint64_t r13;
uint64_t r14;
uint64_t r15;
 
ipl_t ipl;
} __attribute__ ((packed));
/kernel/trunk/arch/amd64/include/cpu.h
76,7 → 76,7
};
 
extern void set_efer_flag(int flag);
extern __u64 read_efer_flag(void);
extern uint64_t read_efer_flag(void);
void cpu_setup_fpu(void);
 
#endif /* __ASM__ */
/kernel/trunk/arch/amd64/src/cpu/cpu.c
124,7 → 124,7
void cpu_arch_init(void)
{
CPU->arch.tss = tss_p;
CPU->arch.tss->iomap_base = &CPU->arch.tss->iomap[0] - ((__u8 *) CPU->arch.tss);
CPU->arch.tss->iomap_base = &CPU->arch.tss->iomap[0] - ((uint8_t *) CPU->arch.tss);
CPU->fpu_owner = NULL;
}
 
/kernel/trunk/arch/amd64/src/syscall.c
57,9 → 57,9
* +0(KDATA_DES), +8(UDATA_DES), +16(UTEXT_DES)
*/
write_msr(AMD_MSR_STAR,
((__u64)(gdtselector(KDATA_DES) | PL_USER)<<48) \
| ((__u64)(gdtselector(KTEXT_DES) | PL_KERNEL)<<32));
write_msr(AMD_MSR_LSTAR, (__u64)syscall_entry);
((uint64_t)(gdtselector(KDATA_DES) | PL_USER)<<48) \
| ((uint64_t)(gdtselector(KTEXT_DES) | PL_KERNEL)<<32));
write_msr(AMD_MSR_LSTAR, (uint64_t)syscall_entry);
/* Mask RFLAGS on syscall
* - disable interrupts, until we exchange the stack register
* (mask the IE bit)
/kernel/trunk/arch/amd64/src/amd64.c
186,7 → 186,7
* The specs say, that on %fs:0 there is stored contents of %fs register,
* we need not to go to CPL0 to read it.
*/
__native sys_tls_set(__native addr)
unative_t sys_tls_set(unative_t addr)
{
THREAD->arch.tls = addr;
write_msr(AMD_MSR_FS, addr);
/kernel/trunk/arch/amd64/src/pm.c
123,13 → 123,13
 
idescriptor_t idt[IDT_ITEMS];
 
ptr_16_64_t gdtr = {.limit = sizeof(gdt), .base= (__u64) gdt };
ptr_16_64_t idtr = {.limit = sizeof(idt), .base= (__u64) idt };
ptr_16_64_t gdtr = {.limit = sizeof(gdt), .base= (uint64_t) gdt };
ptr_16_64_t idtr = {.limit = sizeof(idt), .base= (uint64_t) idt };
 
static tss_t tss;
tss_t *tss_p = NULL;
 
void gdt_tss_setbase(descriptor_t *d, __address base)
void gdt_tss_setbase(descriptor_t *d, uintptr_t base)
{
tss_descriptor_t *td = (tss_descriptor_t *) d;
 
139,7 → 139,7
td->base_32_63 = ((base) >> 32);
}
 
void gdt_tss_setlimit(descriptor_t *d, __u32 limit)
void gdt_tss_setlimit(descriptor_t *d, uint32_t limit)
{
struct tss_descriptor *td = (tss_descriptor_t *) d;
 
147,7 → 147,7
td->limit_16_19 = (limit >> 16) & 0xf;
}
 
void idt_setoffset(idescriptor_t *d, __address offset)
void idt_setoffset(idescriptor_t *d, uintptr_t offset)
{
/*
* Offset is a linear address.
159,7 → 159,7
 
void tss_initialize(tss_t *t)
{
memsetb((__address) t, sizeof(tss_t), 0);
memsetb((uintptr_t) t, sizeof(tss_t), 0);
}
 
/*
179,7 → 179,7
d->present = 1;
d->type = AR_INTERRUPT; /* masking interrupt */
 
idt_setoffset(d, ((__address) interrupt_handlers) + i*interrupt_handler_size);
idt_setoffset(d, ((uintptr_t) interrupt_handlers) + i*interrupt_handler_size);
exc_register(i, "undef", (iroutine)null_interrupt);
}
 
214,7 → 214,7
/* We are going to use malloc, which may return
* non boot-mapped pointer, initialize the CR3 register
* ahead of page_init */
write_cr3((__address) AS_KERNEL->page_table);
write_cr3((uintptr_t) AS_KERNEL->page_table);
 
tss_p = (struct tss *) malloc(sizeof(tss_t), FRAME_ATOMIC);
if (!tss_p)
228,7 → 228,7
tss_desc->type = AR_TSS;
tss_desc->dpl = PL_KERNEL;
gdt_tss_setbase(&gdt_p[TSS_DES], (__address) tss_p);
gdt_tss_setbase(&gdt_p[TSS_DES], (uintptr_t) tss_p);
gdt_tss_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE - 1);
 
gdtr_load(&gdtr);
/kernel/trunk/arch/amd64/src/ddi/ddi.c
55,7 → 55,7
*
* @return 0 on success or an error code from errno.h.
*/
int ddi_iospace_enable_arch(task_t *task, __address ioaddr, size_t size)
int ddi_iospace_enable_arch(task_t *task, uintptr_t ioaddr, size_t size)
{
count_t bits;
 
65,13 → 65,13
 
if (task->arch.iomap.bits < bits) {
bitmap_t oldiomap;
__u8 *newmap;
uint8_t *newmap;
/*
* The I/O permission bitmap is too small and needs to be grown.
*/
newmap = (__u8 *) malloc(BITS2BYTES(bits), FRAME_ATOMIC);
newmap = (uint8_t *) malloc(BITS2BYTES(bits), FRAME_ATOMIC);
if (!newmap)
return ENOMEM;
/kernel/trunk/arch/amd64/src/proc/scheduler.c
56,12 → 56,12
/** Perform amd64 specific tasks needed before the new thread is scheduled. */
void before_thread_runs_arch(void)
{
CPU->arch.tss->rsp0 = (__address) &THREAD->kstack[THREAD_STACK_SIZE-SP_DELTA];
CPU->arch.tss->rsp0 = (uintptr_t) &THREAD->kstack[THREAD_STACK_SIZE-SP_DELTA];
 
/* Syscall support - write address of thread stack pointer to
* hidden part of gs */
swapgs();
write_msr(AMD_MSR_GS, (__u64)&THREAD->kstack);
write_msr(AMD_MSR_GS, (uint64_t)&THREAD->kstack);
swapgs();
 
/* TLS support - set FS to thread local storage */
/kernel/trunk/arch/amd64/src/debugger.c
46,7 → 46,7
#include <smp/ipi.h>
 
typedef struct {
__address address; /**< Breakpoint address */
uintptr_t address; /**< Breakpoint address */
int flags; /**< Flags regarding breakpoint */
int counter; /**< How many times the exception occured */
} bpinfo_t;
122,7 → 122,7
/* Setup DR register according to table */
static void setup_dr(int curidx)
{
__native dr7;
unative_t dr7;
bpinfo_t *cur = &breakpoints[curidx];
int flags = breakpoints[curidx].flags;
 
153,14 → 153,14
;
} else {
if (sizeof(int) == 4)
dr7 |= ((__native) 0x3) << (18 + 4*curidx);
dr7 |= ((unative_t) 0x3) << (18 + 4*curidx);
else /* 8 */
dr7 |= ((__native) 0x2) << (18 + 4*curidx);
dr7 |= ((unative_t) 0x2) << (18 + 4*curidx);
if ((flags & BKPOINT_WRITE))
dr7 |= ((__native) 0x1) << (16 + 4*curidx);
dr7 |= ((unative_t) 0x1) << (16 + 4*curidx);
else if ((flags & BKPOINT_READ_WRITE))
dr7 |= ((__native) 0x3) << (16 + 4*curidx);
dr7 |= ((unative_t) 0x3) << (16 + 4*curidx);
}
 
/* Enable global breakpoint */
205,7 → 205,7
}
cur = &breakpoints[curidx];
 
cur->address = (__address) where;
cur->address = (uintptr_t) where;
cur->flags = flags;
cur->counter = 0;
 
235,13 → 235,13
/* Handle zero checker */
if (! (breakpoints[slot].flags & BKPOINT_INSTR)) {
if ((breakpoints[slot].flags & BKPOINT_CHECK_ZERO)) {
if (*((__native *) breakpoints[slot].address) != 0)
if (*((unative_t *) breakpoints[slot].address) != 0)
return;
printf("**** Found ZERO on address %p ****\n",
slot, breakpoints[slot].address);
} else {
printf("Data watchpoint - new data: %p\n",
*((__native *) breakpoints[slot].address));
*((unative_t *) breakpoints[slot].address));
}
}
printf("Reached breakpoint %d:%p(%s)\n", slot, getip(istate),
315,7 → 315,7
 
static void debug_exception(int n, istate_t *istate)
{
__native dr6;
unative_t dr6;
int i;
/* Set RF to restart the instruction */
/kernel/trunk/arch/amd64/src/mm/memory_init.c
37,9 → 37,9
#include <arch/mm/page.h>
#include <print.h>
 
__u8 e820counter = 0xff;
uint8_t e820counter = 0xff;
struct e820memmap_ e820table[MEMMAP_E820_MAX_RECORDS];
__u32 e801memorysize;
uint32_t e801memorysize;
 
size_t get_memory_size(void)
{
48,7 → 48,7
 
void memory_print_map(void)
{
__u8 i;
uint8_t i;
for (i=0;i<e820counter;i++) {
printf("E820 base: %#llx size: %#llx type: ", e820table[i].base_address, e820table[i].size);
/kernel/trunk/arch/amd64/src/mm/page.c
62,19 → 62,19
#define PTL3_ADDR(ptl2, page) ((pte_t *)PA2KA(GET_PTL3_ADDRESS_ARCH(ptl2, PTL2_INDEX_ARCH(page))))
 
#define SETUP_PTL1(ptl0, page, tgt) { \
SET_PTL1_ADDRESS_ARCH(ptl0, PTL0_INDEX_ARCH(page), (__address)KA2PA(tgt)); \
SET_PTL1_ADDRESS_ARCH(ptl0, PTL0_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \
SET_PTL1_FLAGS_ARCH(ptl0, PTL0_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \
}
#define SETUP_PTL2(ptl1, page, tgt) { \
SET_PTL2_ADDRESS_ARCH(ptl1, PTL1_INDEX_ARCH(page), (__address)KA2PA(tgt)); \
SET_PTL2_ADDRESS_ARCH(ptl1, PTL1_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \
SET_PTL2_FLAGS_ARCH(ptl1, PTL1_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \
}
#define SETUP_PTL3(ptl2, page, tgt) { \
SET_PTL3_ADDRESS_ARCH(ptl2, PTL2_INDEX_ARCH(page), (__address)KA2PA(tgt)); \
SET_PTL3_ADDRESS_ARCH(ptl2, PTL2_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \
SET_PTL3_FLAGS_ARCH(ptl2, PTL2_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \
}
#define SETUP_FRAME(ptl3, page, tgt) { \
SET_FRAME_ADDRESS_ARCH(ptl3, PTL3_INDEX_ARCH(page), (__address)KA2PA(tgt)); \
SET_FRAME_ADDRESS_ARCH(ptl3, PTL3_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \
SET_FRAME_FLAGS_ARCH(ptl3, PTL3_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \
}
 
81,7 → 81,7
 
void page_arch_init(void)
{
__address cur;
uintptr_t cur;
int i;
int identity_flags = PAGE_CACHEABLE | PAGE_EXEC | PAGE_GLOBAL;
 
109,10 → 109,10
}
 
exc_register(14, "page_fault", (iroutine)page_fault);
write_cr3((__address) AS_KERNEL->page_table);
write_cr3((uintptr_t) AS_KERNEL->page_table);
}
else {
write_cr3((__address) AS_KERNEL->page_table);
write_cr3((uintptr_t) AS_KERNEL->page_table);
}
}
 
125,8 → 125,8
*/
void ident_page_fault(int n, istate_t *istate)
{
__address page;
static __address oldpage = 0;
uintptr_t page;
static uintptr_t oldpage = 0;
pte_t *aptl_1, *aptl_2, *aptl_3;
 
page = read_cr2();
173,7 → 173,7
 
void page_fault(int n, istate_t *istate)
{
__address page;
uintptr_t page;
pf_access_t access;
page = read_cr2();
198,12 → 198,12
}
 
 
__address hw_map(__address physaddr, size_t size)
uintptr_t hw_map(uintptr_t physaddr, size_t size)
{
if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH))
panic("Unable to map physical memory %p (%d bytes)", physaddr, size)
__address virtaddr = PA2KA(last_frame);
uintptr_t virtaddr = PA2KA(last_frame);
pfn_t i;
for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++)
page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE);
/kernel/trunk/arch/amd64/src/interrupt.c
56,7 → 56,7
void print_info_errcode(int n, istate_t *istate)
{
char *symbol;
/* __u64 *x = &istate->stack[0]; */
/* uint64_t *x = &istate->stack[0]; */
 
if (!(symbol=get_symtab_entry(istate->rip)))
symbol = "";
79,8 → 79,8
* Interrupt and exception dispatching.
*/
 
void (* disable_irqs_function)(__u16 irqmask) = NULL;
void (* enable_irqs_function)(__u16 irqmask) = NULL;
void (* disable_irqs_function)(uint16_t irqmask) = NULL;
void (* enable_irqs_function)(uint16_t irqmask) = NULL;
void (* eoi_function)(void) = NULL;
 
void null_interrupt(int n, istate_t *istate)
141,7 → 141,7
tlb_shootdown_ipi_recv();
}
 
void trap_virtual_enable_irqs(__u16 irqmask)
void trap_virtual_enable_irqs(uint16_t irqmask)
{
if (enable_irqs_function)
enable_irqs_function(irqmask);
149,7 → 149,7
panic("no enable_irqs_function\n");
}
 
void trap_virtual_disable_irqs(__u16 irqmask)
void trap_virtual_disable_irqs(uint16_t irqmask)
{
if (disable_irqs_function)
disable_irqs_function(irqmask);
174,7 → 174,7
 
 
/* Reregister irq to be IPC-ready */
void irq_ipc_bind_arch(__native irq)
void irq_ipc_bind_arch(unative_t irq)
{
if (irq == IRQ_CLK)
return;