/kernel/trunk/arch/sparc64/include/interrupt.h |
---|
54,7 → 54,7 |
struct istate { |
}; |
static inline void istate_set_retaddr(istate_t *istate, __address retaddr) |
static inline void istate_set_retaddr(istate_t *istate, uintptr_t retaddr) |
{ |
/* TODO */ |
} |
63,7 → 63,7 |
/* TODO */ |
return 0; |
} |
static inline __native istate_get_pc(istate_t *istate) |
static inline unative_t istate_get_pc(istate_t *istate) |
{ |
/* TODO */ |
return 0; |
/kernel/trunk/arch/sparc64/include/byteorder.h |
---|
38,14 → 38,14 |
#include <arch/types.h> |
#include <byteorder.h> |
static inline __u64 __u64_le2host(__u64 n) |
static inline uint64_t uint64_t_le2host(uint64_t n) |
{ |
return __u64_byteorder_swap(n); |
return uint64_t_byteorder_swap(n); |
} |
static inline __native __native_le2host(__native n) |
static inline unative_t unative_t_le2host(unative_t n) |
{ |
return __u64_byteorder_swap(n); |
return uint64_t_byteorder_swap(n); |
} |
#endif |
/kernel/trunk/arch/sparc64/include/types.h |
---|
37,27 → 37,27 |
#define NULL 0 |
typedef signed char __s8; |
typedef signed short __s16; |
typedef signed int __s32; |
typedef signed long __s64; |
typedef signed char int8_t; |
typedef signed short int16_t; |
typedef signed int int32_t; |
typedef signed long int64_t; |
typedef unsigned char __u8; |
typedef unsigned short __u16; |
typedef unsigned int __u32; |
typedef unsigned long __u64; |
typedef unsigned char uint8_t; |
typedef unsigned short uint16_t; |
typedef unsigned int uint32_t; |
typedef unsigned long uint64_t; |
typedef __u64 __address; |
typedef __u64 pfn_t; |
typedef uint64_t uintptr_t; |
typedef uint64_t pfn_t; |
typedef __u64 ipl_t; |
typedef uint64_t ipl_t; |
typedef __u64 __native; |
typedef __s64 __snative; |
typedef uint64_t unative_t; |
typedef int64_t native_t; |
typedef struct pte pte_t; |
typedef __u8 asi_t; |
typedef uint8_t asi_t; |
#endif |
/kernel/trunk/arch/sparc64/include/memstr.h |
---|
37,10 → 37,10 |
#define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt)) |
extern void memsetw(__address dst, size_t cnt, __u16 x); |
extern void memsetb(__address dst, size_t cnt, __u8 x); |
extern void memsetw(uintptr_t dst, size_t cnt, uint16_t x); |
extern void memsetb(uintptr_t dst, size_t cnt, uint8_t x); |
extern int memcmp(__address src, __address dst, int cnt); |
extern int memcmp(uintptr_t src, uintptr_t dst, int cnt); |
#endif |
/kernel/trunk/arch/sparc64/include/atomic.h |
---|
49,8 → 49,8 |
*/ |
static inline long atomic_add(atomic_t *val, int i) |
{ |
__u64 a, b; |
volatile __u64 x = (__u64) &val->count; |
uint64_t a, b; |
volatile uint64_t x = (uint64_t) &val->count; |
__asm__ volatile ( |
"0:\n" |
60,7 → 60,7 |
"cmp %1, %2\n" |
"bne 0b\n" /* The operation failed and must be attempted again if a != b. */ |
"nop\n" |
: "=m" (*((__u64 *)x)), "=r" (a), "=r" (b) |
: "=m" (*((uint64_t *)x)), "=r" (a), "=r" (b) |
: "r" (i) |
); |
/kernel/trunk/arch/sparc64/include/faddr.h |
---|
37,7 → 37,7 |
#include <arch/types.h> |
#define FADDR(fptr) ((__address) (fptr)) |
#define FADDR(fptr) ((uintptr_t) (fptr)) |
#endif |
/kernel/trunk/arch/sparc64/include/asm.h |
---|
44,9 → 44,9 |
* |
* @return Value of PSTATE register. |
*/ |
static inline __u64 pstate_read(void) |
static inline uint64_t pstate_read(void) |
{ |
__u64 v; |
uint64_t v; |
__asm__ volatile ("rdpr %%pstate, %0\n" : "=r" (v)); |
57,7 → 57,7 |
* |
* @param v New value of PSTATE register. |
*/ |
static inline void pstate_write(__u64 v) |
static inline void pstate_write(uint64_t v) |
{ |
__asm__ volatile ("wrpr %0, %1, %%pstate\n" : : "r" (v), "i" (0)); |
} |
66,9 → 66,9 |
* |
* @return Value of TICK_comapre register. |
*/ |
static inline __u64 tick_compare_read(void) |
static inline uint64_t tick_compare_read(void) |
{ |
__u64 v; |
uint64_t v; |
__asm__ volatile ("rd %%tick_cmpr, %0\n" : "=r" (v)); |
79,7 → 79,7 |
* |
* @param v New value of TICK_comapre register. |
*/ |
static inline void tick_compare_write(__u64 v) |
static inline void tick_compare_write(uint64_t v) |
{ |
__asm__ volatile ("wr %0, %1, %%tick_cmpr\n" : : "r" (v), "i" (0)); |
} |
88,9 → 88,9 |
* |
* @return Value of TICK register. |
*/ |
static inline __u64 tick_read(void) |
static inline uint64_t tick_read(void) |
{ |
__u64 v; |
uint64_t v; |
__asm__ volatile ("rdpr %%tick, %0\n" : "=r" (v)); |
101,7 → 101,7 |
* |
* @param v New value of TICK register. |
*/ |
static inline void tick_write(__u64 v) |
static inline void tick_write(uint64_t v) |
{ |
__asm__ volatile ("wrpr %0, %1, %%tick\n" : : "r" (v), "i" (0)); |
} |
110,9 → 110,9 |
* |
* @return Value of SOFTINT register. |
*/ |
static inline __u64 softint_read(void) |
static inline uint64_t softint_read(void) |
{ |
__u64 v; |
uint64_t v; |
__asm__ volatile ("rd %%softint, %0\n" : "=r" (v)); |
123,7 → 123,7 |
* |
* @param v New value of SOFTINT register. |
*/ |
static inline void softint_write(__u64 v) |
static inline void softint_write(uint64_t v) |
{ |
__asm__ volatile ("wr %0, %1, %%softint\n" : : "r" (v), "i" (0)); |
} |
134,7 → 134,7 |
* |
* @param v New value of CLEAR_SOFTINT register. |
*/ |
static inline void clear_softint_write(__u64 v) |
static inline void clear_softint_write(uint64_t v) |
{ |
__asm__ volatile ("wr %0, %1, %%clear_softint\n" : : "r" (v), "i" (0)); |
} |
148,7 → 148,7 |
*/ |
static inline ipl_t interrupts_enable(void) { |
pstate_reg_t pstate; |
__u64 value; |
uint64_t value; |
value = pstate_read(); |
pstate.value = value; |
167,7 → 167,7 |
*/ |
static inline ipl_t interrupts_disable(void) { |
pstate_reg_t pstate; |
__u64 value; |
uint64_t value; |
value = pstate_read(); |
pstate.value = value; |
207,9 → 207,9 |
* The stack is assumed to be STACK_SIZE bytes long. |
* The stack must start on page boundary. |
*/ |
static inline __address get_stack_base(void) |
static inline uintptr_t get_stack_base(void) |
{ |
__address v; |
uintptr_t v; |
__asm__ volatile ("and %%sp, %1, %0\n" : "=r" (v) : "r" (~(STACK_SIZE-1))); |
220,9 → 220,9 |
* |
* @return Value of VER register. |
*/ |
static inline __u64 ver_read(void) |
static inline uint64_t ver_read(void) |
{ |
__u64 v; |
uint64_t v; |
__asm__ volatile ("rdpr %%ver, %0\n" : "=r" (v)); |
233,9 → 233,9 |
* |
* @return Current value in TBA. |
*/ |
static inline __u64 tba_read(void) |
static inline uint64_t tba_read(void) |
{ |
__u64 v; |
uint64_t v; |
__asm__ volatile ("rdpr %%tba, %0\n" : "=r" (v)); |
246,9 → 246,9 |
* |
* @return Current value in TPC. |
*/ |
static inline __u64 tpc_read(void) |
static inline uint64_t tpc_read(void) |
{ |
__u64 v; |
uint64_t v; |
__asm__ volatile ("rdpr %%tpc, %0\n" : "=r" (v)); |
259,9 → 259,9 |
* |
* @return Current value in TL. |
*/ |
static inline __u64 tl_read(void) |
static inline uint64_t tl_read(void) |
{ |
__u64 v; |
uint64_t v; |
__asm__ volatile ("rdpr %%tl, %0\n" : "=r" (v)); |
272,12 → 272,12 |
* |
* @param v New value of TBA. |
*/ |
static inline void tba_write(__u64 v) |
static inline void tba_write(uint64_t v) |
{ |
__asm__ volatile ("wrpr %0, %1, %%tba\n" : : "r" (v), "i" (0)); |
} |
/** Load __u64 from alternate space. |
/** Load uint64_t from alternate space. |
* |
* @param asi ASI determining the alternate space. |
* @param va Virtual address within the ASI. |
284,9 → 284,9 |
* |
* @return Value read from the virtual address in the specified address space. |
*/ |
static inline __u64 asi_u64_read(asi_t asi, __address va) |
static inline uint64_t asi_u64_read(asi_t asi, uintptr_t va) |
{ |
__u64 v; |
uint64_t v; |
__asm__ volatile ("ldxa [%1] %2, %0\n" : "=r" (v) : "r" (va), "i" (asi)); |
293,13 → 293,13 |
return v; |
} |
/** Store __u64 to alternate space. |
/** Store uint64_t to alternate space. |
* |
* @param asi ASI determining the alternate space. |
* @param va Virtual address within the ASI. |
* @param v Value to be written. |
*/ |
static inline void asi_u64_write(asi_t asi, __address va, __u64 v) |
static inline void asi_u64_write(asi_t asi, uintptr_t va, uint64_t v) |
{ |
__asm__ volatile ("stxa %0, [%1] %2\n" : : "r" (v), "r" (va), "i" (asi) : "memory"); |
} |
308,7 → 308,7 |
void cpu_halt(void); |
void cpu_sleep(void); |
void asm_delay_loop(__u32 t); |
void asm_delay_loop(uint32_t t); |
#endif |
/kernel/trunk/arch/sparc64/include/trap/trap_table.h |
---|
47,7 → 47,7 |
#ifndef __ASM__ |
struct trap_table_entry { |
__u8 octets[TRAP_TABLE_ENTRY_SIZE]; |
uint8_t octets[TRAP_TABLE_ENTRY_SIZE]; |
} __attribute__ ((packed)); |
typedef struct trap_table_entry trap_table_entry_t; |
/kernel/trunk/arch/sparc64/include/trap/trap.h |
---|
42,7 → 42,7 |
static inline void trap_switch_trap_table(void) |
{ |
/* Point TBA to kernel copy of OFW's trap table. */ |
tba_write((__u64) trap_table); |
tba_write((uint64_t) trap_table); |
} |
extern void trap_init(void); |
/kernel/trunk/arch/sparc64/include/mm/frame.h |
---|
44,10 → 44,10 |
#include <arch/types.h> |
union frame_address { |
__address address; |
uintptr_t address; |
struct { |
unsigned : 23; |
__u64 pfn : 28; /**< Physical Frame Number. */ |
uint64_t pfn : 28; /**< Physical Frame Number. */ |
unsigned offset : 13; /**< Offset. */ |
} __attribute__ ((packed)); |
}; |
/kernel/trunk/arch/sparc64/include/mm/page.h |
---|
46,13 → 46,13 |
#include <arch/types.h> |
#include <genarch/mm/page_ht.h> |
#define KA2PA(x) ((__address) (x)) |
#define PA2KA(x) ((__address) (x)) |
#define KA2PA(x) ((uintptr_t) (x)) |
#define PA2KA(x) ((uintptr_t) (x)) |
union page_address { |
__address address; |
uintptr_t address; |
struct { |
__u64 vpn : 51; /**< Virtual Page Number. */ |
uint64_t vpn : 51; /**< Virtual Page Number. */ |
unsigned offset : 13; /**< Offset. */ |
} __attribute__ ((packed)); |
}; |
/kernel/trunk/arch/sparc64/include/mm/tte.h |
---|
39,13 → 39,13 |
/** Translation Table Entry - Tag. */ |
union tte_tag { |
__u64 value; |
uint64_t value; |
struct { |
unsigned g : 1; /**< Global. */ |
unsigned : 2; /**< Reserved. */ |
unsigned context : 13; /**< Context identifier. */ |
unsigned : 6; /**< Reserved. */ |
__u64 va_tag : 42; /**< Virtual Address Tag, bits 63:22. */ |
uint64_t va_tag : 42; /**< Virtual Address Tag, bits 63:22. */ |
} __attribute__ ((packed)); |
}; |
53,7 → 53,7 |
/** Translation Table Entry - Data. */ |
union tte_data { |
__u64 value; |
uint64_t value; |
struct { |
unsigned v : 1; /**< Valid. */ |
unsigned size : 2; /**< Page size of this entry. */ |
/kernel/trunk/arch/sparc64/include/mm/mmu.h |
---|
82,7 → 82,7 |
/** LSU Control Register. */ |
union lsu_cr_reg { |
__u64 value; |
uint64_t value; |
struct { |
unsigned : 23; |
unsigned pm : 8; |
/kernel/trunk/arch/sparc64/include/mm/asid.h |
---|
40,7 → 40,7 |
/* |
* On SPARC, Context means the same thing as ASID trough out the kernel. |
*/ |
typedef __u16 asid_t; |
typedef uint16_t asid_t; |
#define ASID_MAX_ARCH 8191 /* 2^13 - 1 */ |
/kernel/trunk/arch/sparc64/include/mm/tlb.h |
---|
56,7 → 56,7 |
#define KERNEL_PAGE_WIDTH 22 /* 4M */ |
union tlb_context_reg { |
__u64 v; |
uint64_t v; |
struct { |
unsigned long : 51; |
unsigned context : 13; /**< Context/ASID. */ |
69,9 → 69,9 |
/** I-/D-TLB Data Access Address in Alternate Space. */ |
union tlb_data_access_addr { |
__u64 value; |
uint64_t value; |
struct { |
__u64 : 55; |
uint64_t : 55; |
unsigned tlb_entry : 6; |
unsigned : 3; |
} __attribute__ ((packed)); |
81,9 → 81,9 |
/** I-/D-TLB Tag Read Register. */ |
union tlb_tag_read_reg { |
__u64 value; |
uint64_t value; |
struct { |
__u64 vpn : 51; /**< Virtual Address bits 63:13. */ |
uint64_t vpn : 51; /**< Virtual Address bits 63:13. */ |
unsigned context : 13; /**< Context identifier. */ |
} __attribute__ ((packed)); |
}; |
101,9 → 101,9 |
/** TLB Demap Operation Address. */ |
union tlb_demap_addr { |
__u64 value; |
uint64_t value; |
struct { |
__u64 vpn: 51; /**< Virtual Address bits 63:13. */ |
uint64_t vpn: 51; /**< Virtual Address bits 63:13. */ |
unsigned : 6; /**< Ignored. */ |
unsigned type : 1; /**< The type of demap operation. */ |
unsigned context : 2; /**< Context register selection. */ |
114,7 → 114,7 |
/** TLB Synchronous Fault Status Register. */ |
union tlb_sfsr_reg { |
__u64 value; |
uint64_t value; |
struct { |
unsigned long : 39; /**< Implementation dependent. */ |
unsigned nf : 1; /**< Nonfaulting load. */ |
136,7 → 136,7 |
* |
* @return Current value of Primary Context Register. |
*/ |
static inline __u64 mmu_primary_context_read(void) |
static inline uint64_t mmu_primary_context_read(void) |
{ |
return asi_u64_read(ASI_DMMU, VA_PRIMARY_CONTEXT_REG); |
} |
145,7 → 145,7 |
* |
* @param v New value of Primary Context Register. |
*/ |
static inline void mmu_primary_context_write(__u64 v) |
static inline void mmu_primary_context_write(uint64_t v) |
{ |
asi_u64_write(ASI_DMMU, VA_PRIMARY_CONTEXT_REG, v); |
flush(); |
155,7 → 155,7 |
* |
* @return Current value of Secondary Context Register. |
*/ |
static inline __u64 mmu_secondary_context_read(void) |
static inline uint64_t mmu_secondary_context_read(void) |
{ |
return asi_u64_read(ASI_DMMU, VA_SECONDARY_CONTEXT_REG); |
} |
164,7 → 164,7 |
* |
* @param v New value of Primary Context Register. |
*/ |
static inline void mmu_secondary_context_write(__u64 v) |
static inline void mmu_secondary_context_write(uint64_t v) |
{ |
asi_u64_write(ASI_DMMU, VA_PRIMARY_CONTEXT_REG, v); |
flush(); |
176,7 → 176,7 |
* |
* @return Current value of specified IMMU TLB Data Access Register. |
*/ |
static inline __u64 itlb_data_access_read(index_t entry) |
static inline uint64_t itlb_data_access_read(index_t entry) |
{ |
tlb_data_access_addr_t reg; |
190,7 → 190,7 |
* @param entry TLB Entry index. |
* @param value Value to be written. |
*/ |
static inline void itlb_data_access_write(index_t entry, __u64 value) |
static inline void itlb_data_access_write(index_t entry, uint64_t value) |
{ |
tlb_data_access_addr_t reg; |
206,7 → 206,7 |
* |
* @return Current value of specified DMMU TLB Data Access Register. |
*/ |
static inline __u64 dtlb_data_access_read(index_t entry) |
static inline uint64_t dtlb_data_access_read(index_t entry) |
{ |
tlb_data_access_addr_t reg; |
220,7 → 220,7 |
* @param entry TLB Entry index. |
* @param value Value to be written. |
*/ |
static inline void dtlb_data_access_write(index_t entry, __u64 value) |
static inline void dtlb_data_access_write(index_t entry, uint64_t value) |
{ |
tlb_data_access_addr_t reg; |
236,7 → 236,7 |
* |
* @return Current value of specified IMMU TLB Tag Read Register. |
*/ |
static inline __u64 itlb_tag_read_read(index_t entry) |
static inline uint64_t itlb_tag_read_read(index_t entry) |
{ |
tlb_tag_read_addr_t tag; |
251,7 → 251,7 |
* |
* @return Current value of specified DMMU TLB Tag Read Register. |
*/ |
static inline __u64 dtlb_tag_read_read(index_t entry) |
static inline uint64_t dtlb_tag_read_read(index_t entry) |
{ |
tlb_tag_read_addr_t tag; |
264,7 → 264,7 |
* |
* @param v Value to be written. |
*/ |
static inline void itlb_tag_access_write(__u64 v) |
static inline void itlb_tag_access_write(uint64_t v) |
{ |
asi_u64_write(ASI_IMMU, VA_IMMU_TAG_ACCESS, v); |
flush(); |
274,7 → 274,7 |
* |
* @return Current value of IMMU TLB Tag Access Register. |
*/ |
static inline __u64 itlb_tag_access_read(void) |
static inline uint64_t itlb_tag_access_read(void) |
{ |
return asi_u64_read(ASI_IMMU, VA_IMMU_TAG_ACCESS); |
} |
283,7 → 283,7 |
* |
* @param v Value to be written. |
*/ |
static inline void dtlb_tag_access_write(__u64 v) |
static inline void dtlb_tag_access_write(uint64_t v) |
{ |
asi_u64_write(ASI_DMMU, VA_DMMU_TAG_ACCESS, v); |
flush(); |
293,7 → 293,7 |
* |
* @return Current value of DMMU TLB Tag Access Register. |
*/ |
static inline __u64 dtlb_tag_access_read(void) |
static inline uint64_t dtlb_tag_access_read(void) |
{ |
return asi_u64_read(ASI_DMMU, VA_DMMU_TAG_ACCESS); |
} |
303,7 → 303,7 |
* |
* @param v Value to be written. |
*/ |
static inline void itlb_data_in_write(__u64 v) |
static inline void itlb_data_in_write(uint64_t v) |
{ |
asi_u64_write(ASI_ITLB_DATA_IN_REG, 0, v); |
flush(); |
313,7 → 313,7 |
* |
* @param v Value to be written. |
*/ |
static inline void dtlb_data_in_write(__u64 v) |
static inline void dtlb_data_in_write(uint64_t v) |
{ |
asi_u64_write(ASI_DTLB_DATA_IN_REG, 0, v); |
flush(); |
323,7 → 323,7 |
* |
* @return Current content of I-SFSR register. |
*/ |
static inline __u64 itlb_sfsr_read(void) |
static inline uint64_t itlb_sfsr_read(void) |
{ |
return asi_u64_read(ASI_IMMU, VA_IMMU_SFSR); |
} |
332,7 → 332,7 |
* |
* @param v New value of I-SFSR register. |
*/ |
static inline void itlb_sfsr_write(__u64 v) |
static inline void itlb_sfsr_write(uint64_t v) |
{ |
asi_u64_write(ASI_IMMU, VA_IMMU_SFSR, v); |
flush(); |
342,7 → 342,7 |
* |
* @return Current content of D-SFSR register. |
*/ |
static inline __u64 dtlb_sfsr_read(void) |
static inline uint64_t dtlb_sfsr_read(void) |
{ |
return asi_u64_read(ASI_DMMU, VA_DMMU_SFSR); |
} |
351,7 → 351,7 |
* |
* @param v New value of D-SFSR register. |
*/ |
static inline void dtlb_sfsr_write(__u64 v) |
static inline void dtlb_sfsr_write(uint64_t v) |
{ |
asi_u64_write(ASI_DMMU, VA_DMMU_SFSR, v); |
flush(); |
361,7 → 361,7 |
* |
* @return Current content of D-SFAR register. |
*/ |
static inline __u64 dtlb_sfar_read(void) |
static inline uint64_t dtlb_sfar_read(void) |
{ |
return asi_u64_read(ASI_DMMU, VA_DMMU_SFAR); |
} |
372,7 → 372,7 |
* @param context_encoding Specifies which Context register has Context ID for demap. |
* @param page Address which is on the page to be demapped. |
*/ |
static inline void itlb_demap(int type, int context_encoding, __address page) |
static inline void itlb_demap(int type, int context_encoding, uintptr_t page) |
{ |
tlb_demap_addr_t da; |
page_address_t pg; |
394,7 → 394,7 |
* @param context_encoding Specifies which Context register has Context ID for demap. |
* @param page Address which is on the page to be demapped. |
*/ |
static inline void dtlb_demap(int type, int context_encoding, __address page) |
static inline void dtlb_demap(int type, int context_encoding, uintptr_t page) |
{ |
tlb_demap_addr_t da; |
page_address_t pg; |
414,7 → 414,7 |
extern void fast_data_access_mmu_miss(void); |
extern void fast_data_access_protection(void); |
extern void dtlb_insert_mapping(__address page, __address frame, int pagesize, bool locked, bool cacheable); |
extern void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize, bool locked, bool cacheable); |
#endif |
/kernel/trunk/arch/sparc64/include/context.h |
---|
54,8 → 54,8 |
#endif |
#define context_set(c, _pc, stack, size) \ |
(c)->pc = ((__address) _pc) - 8; \ |
(c)->sp = ((__address) stack) + ALIGN_UP((size), STACK_ALIGNMENT) - (STACK_BIAS + SP_DELTA); \ |
(c)->pc = ((uintptr_t) _pc) - 8; \ |
(c)->sp = ((uintptr_t) stack) + ALIGN_UP((size), STACK_ALIGNMENT) - (STACK_BIAS + SP_DELTA); \ |
(c)->fp = -STACK_BIAS; \ |
(c)->cleanwin = 0 |
65,26 → 65,26 |
* function calls. |
*/ |
struct context { |
__address sp; /* %o6 */ |
__address pc; /* %o7 */ |
__u64 i0; |
__u64 i1; |
__u64 i2; |
__u64 i3; |
__u64 i4; |
__u64 i5; |
__address fp; /* %i6 */ |
__address i7; |
__u64 l0; |
__u64 l1; |
__u64 l2; |
__u64 l3; |
__u64 l4; |
__u64 l5; |
__u64 l6; |
__u64 l7; |
uintptr_t sp; /* %o6 */ |
uintptr_t pc; /* %o7 */ |
uint64_t i0; |
uint64_t i1; |
uint64_t i2; |
uint64_t i3; |
uint64_t i4; |
uint64_t i5; |
uintptr_t fp; /* %i6 */ |
uintptr_t i7; |
uint64_t l0; |
uint64_t l1; |
uint64_t l2; |
uint64_t l3; |
uint64_t l4; |
uint64_t l5; |
uint64_t l6; |
uint64_t l7; |
ipl_t ipl; |
__u64 cleanwin; |
uint64_t cleanwin; |
}; |
#endif |
/kernel/trunk/arch/sparc64/include/register.h |
---|
39,13 → 39,13 |
/** Version Register. */ |
union ver_reg { |
__u64 value; |
uint64_t value; |
struct { |
__u16 manuf; /**< Manufacturer code. */ |
__u16 impl; /**< Implementation code. */ |
__u8 mask; /**< Mask set revision. */ |
uint16_t manuf; /**< Manufacturer code. */ |
uint16_t impl; /**< Implementation code. */ |
uint8_t mask; /**< Mask set revision. */ |
unsigned : 8; |
__u8 maxtl; |
uint8_t maxtl; |
unsigned : 3; |
unsigned maxwin : 5; |
} __attribute__ ((packed)); |
54,9 → 54,9 |
/** Processor State Register. */ |
union pstate_reg { |
__u64 value; |
uint64_t value; |
struct { |
__u64 : 52; |
uint64_t : 52; |
unsigned ig : 1; /**< Interrupt Globals. */ |
unsigned mg : 1; /**< MMU Globals. */ |
unsigned cle : 1; /**< Current Little Endian. */ |
74,10 → 74,10 |
/** TICK Register. */ |
union tick_reg { |
__u64 value; |
uint64_t value; |
struct { |
unsigned npt : 1; /**< Non-privileged Trap enable. */ |
__u64 counter : 63; /**< Elapsed CPU clck cycle counter. */ |
uint64_t counter : 63; /**< Elapsed CPU clck cycle counter. */ |
} __attribute__ ((packed)); |
}; |
typedef union tick_reg tick_reg_t; |
84,10 → 84,10 |
/** TICK_compare Register. */ |
union tick_compare_reg { |
__u64 value; |
uint64_t value; |
struct { |
unsigned int_dis : 1; /**< TICK_INT interrupt disabled flag. */ |
__u64 tick_cmpr : 63; /**< Compare value for TICK interrupts. */ |
uint64_t tick_cmpr : 63; /**< Compare value for TICK interrupts. */ |
} __attribute__ ((packed)); |
}; |
typedef union tick_compare_reg tick_compare_reg_t; |
94,9 → 94,9 |
/** SOFTINT Register. */ |
union softint_reg { |
__u64 value; |
uint64_t value; |
struct { |
__u64 : 47; |
uint64_t : 47; |
unsigned stick_int : 1; |
unsigned int_level : 15; |
unsigned tick_int : 1; |
/kernel/trunk/arch/sparc64/include/drivers/i8042.h |
---|
45,24 → 45,24 |
#define LAST_REG DATA_REG |
extern volatile __u8 *kbd_virt_address; |
extern volatile uint8_t *kbd_virt_address; |
static inline void i8042_data_write(__u8 data) |
static inline void i8042_data_write(uint8_t data) |
{ |
kbd_virt_address[DATA_REG] = data; |
} |
static inline __u8 i8042_data_read(void) |
static inline uint8_t i8042_data_read(void) |
{ |
return kbd_virt_address[DATA_REG]; |
} |
static inline __u8 i8042_status_read(void) |
static inline uint8_t i8042_status_read(void) |
{ |
return kbd_virt_address[STATUS_REG]; |
} |
static inline void i8042_command_write(__u8 command) |
static inline void i8042_command_write(uint8_t command) |
{ |
kbd_virt_address[COMMAND_REG] = command; |
} |
/kernel/trunk/arch/sparc64/src/ddi/ddi.c |
---|
47,7 → 47,7 |
* |
* @return 0 on success or an error code from errno.h. |
*/ |
int ddi_iospace_enable_arch(task_t *task, __address ioaddr, size_t size) |
int ddi_iospace_enable_arch(task_t *task, uintptr_t ioaddr, size_t size) |
{ |
return 0; |
} |
/kernel/trunk/arch/sparc64/src/proc/scheduler.c |
---|
48,18 → 48,18 |
/** Ensure that thread's kernel stack is locked in TLB. */ |
void before_thread_runs_arch(void) |
{ |
__address base; |
uintptr_t base; |
base = ALIGN_DOWN(config.base, 1<<KERNEL_PAGE_WIDTH); |
if ((__address) THREAD->kstack < base || (__address) THREAD->kstack > base + (1<<KERNEL_PAGE_WIDTH)) { |
if ((uintptr_t) THREAD->kstack < base || (uintptr_t) THREAD->kstack > base + (1<<KERNEL_PAGE_WIDTH)) { |
/* |
* Kernel stack of this thread is not locked in DTLB. |
* First, make sure it is not mapped already. |
* If not, create a locked mapping for it. |
*/ |
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (__address) THREAD->kstack); |
dtlb_insert_mapping((__address) THREAD->kstack, KA2PA(THREAD->kstack), PAGESIZE_8K, true, true); |
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t) THREAD->kstack); |
dtlb_insert_mapping((uintptr_t) THREAD->kstack, KA2PA(THREAD->kstack), PAGESIZE_8K, true, true); |
} |
} |
66,16 → 66,16 |
/** Unlock thread's stack from TLB, if necessary. */ |
void after_thread_ran_arch(void) |
{ |
__address base; |
uintptr_t base; |
base = ALIGN_DOWN(config.base, 1<<KERNEL_PAGE_WIDTH); |
if ((__address) THREAD->kstack < base || (__address) THREAD->kstack > base + (1<<KERNEL_PAGE_WIDTH)) { |
if ((uintptr_t) THREAD->kstack < base || (uintptr_t) THREAD->kstack > base + (1<<KERNEL_PAGE_WIDTH)) { |
/* |
* Kernel stack of this thread is locked in DTLB. |
* Destroy the mapping. |
*/ |
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (__address) THREAD->kstack); |
dtlb_demap(TLB_DEMAP_PAGE, TLB_DEMAP_NUCLEUS, (uintptr_t) THREAD->kstack); |
} |
} |
/kernel/trunk/arch/sparc64/src/trap/interrupt.c |
---|
52,7 → 52,7 |
} |
/* Reregister irq to be IPC-ready */ |
void irq_ipc_bind_arch(__native irq) |
void irq_ipc_bind_arch(unative_t irq) |
{ |
panic("not implemented\n"); |
/* TODO */ |
/kernel/trunk/arch/sparc64/src/mm/tlb.c |
---|
131,7 → 131,7 |
* @param locked True for permanent mappings, false otherwise. |
* @param cacheable True if the mapping is cacheable, false otherwise. |
*/ |
void dtlb_insert_mapping(__address page, __address frame, int pagesize, bool locked, bool cacheable) |
void dtlb_insert_mapping(uintptr_t page, uintptr_t frame, int pagesize, bool locked, bool cacheable) |
{ |
tlb_tag_access_reg_t tag; |
tlb_data_t data; |
170,7 → 170,7 |
void fast_data_access_mmu_miss(void) |
{ |
tlb_tag_access_reg_t tag; |
__address tpc; |
uintptr_t tpc; |
char *tpc_str; |
tag.value = dtlb_tag_access_read(); |
268,7 → 268,7 |
* @param page First page which to sweep out from ITLB and DTLB. |
* @param cnt Number of ITLB and DTLB entries to invalidate. |
*/ |
void tlb_invalidate_pages(asid_t asid, __address page, count_t cnt) |
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt) |
{ |
int i; |
/kernel/trunk/arch/sparc64/src/mm/page.c |
---|
44,7 → 44,7 |
page_mapping_operations = &ht_mapping_operations; |
} |
__address hw_map(__address physaddr, size_t size) |
uintptr_t hw_map(uintptr_t physaddr, size_t size) |
{ |
unsigned int order; |
int i; |
73,7 → 73,7 |
else |
order = (fnzb32(size - 1) + 1) - FRAME_WIDTH; |
__address virtaddr = (__address) frame_alloc(order, FRAME_KA); |
uintptr_t virtaddr = (uintptr_t) frame_alloc(order, FRAME_KA); |
for (i = 0; i < sizemap[order].count; i++) |
dtlb_insert_mapping(virtaddr + i*sizemap[order].increment, |
/kernel/trunk/arch/sparc64/src/drivers/i8042.c |
---|
37,11 → 37,11 |
#include <arch/types.h> |
#include <arch/mm/page.h> |
volatile __u8 *kbd_virt_address = NULL; |
volatile uint8_t *kbd_virt_address = NULL; |
void kbd_init() |
{ |
kbd_virt_address = (__u8 *) hw_map(KBD_PHYS_ADDRESS, LAST_REG); |
kbd_virt_address = (uint8_t *) hw_map(KBD_PHYS_ADDRESS, LAST_REG); |
i8042_init(); |
} |
/kernel/trunk/arch/ia64/include/interrupt.h |
---|
88,38 → 88,38 |
__r128 f30; |
__r128 f31; |
__address ar_bsp; |
__address ar_bspstore; |
__address ar_bspstore_new; |
__u64 ar_rnat; |
__u64 ar_ifs; |
__u64 ar_pfs; |
__u64 ar_rsc; |
__address cr_ifa; |
uintptr_t ar_bsp; |
uintptr_t ar_bspstore; |
uintptr_t ar_bspstore_new; |
uint64_t ar_rnat; |
uint64_t ar_ifs; |
uint64_t ar_pfs; |
uint64_t ar_rsc; |
uintptr_t cr_ifa; |
cr_isr_t cr_isr; |
__address cr_iipa; |
uintptr_t cr_iipa; |
psr_t cr_ipsr; |
__address cr_iip; |
__u64 pr; |
__address sp; |
uintptr_t cr_iip; |
uint64_t pr; |
uintptr_t sp; |
/* |
* The following variables are defined only for break_instruction handler. |
*/ |
__u64 in0; |
__u64 in1; |
__u64 in2; |
__u64 in3; |
__u64 in4; |
uint64_t in0; |
uint64_t in1; |
uint64_t in2; |
uint64_t in3; |
uint64_t in4; |
}; |
static inline void istate_set_retaddr(istate_t *istate, __address retaddr) |
static inline void istate_set_retaddr(istate_t *istate, uintptr_t retaddr) |
{ |
istate->cr_iip = retaddr; |
istate->cr_ipsr.ri = 0; /* return to instruction slot #0 */ |
} |
static inline __native istate_get_pc(istate_t *istate) |
static inline unative_t istate_get_pc(istate_t *istate) |
{ |
return istate->cr_iip; |
} |
131,13 → 131,13 |
extern void *ivt; |
extern void general_exception(__u64 vector, istate_t *istate); |
extern int break_instruction(__u64 vector, istate_t *istate); |
extern void universal_handler(__u64 vector, istate_t *istate); |
extern void nop_handler(__u64 vector, istate_t *istate); |
extern void external_interrupt(__u64 vector, istate_t *istate); |
extern void virtual_interrupt(__u64 irq, void *param); |
extern void disabled_fp_register(__u64 vector, istate_t *istate); |
extern void general_exception(uint64_t vector, istate_t *istate); |
extern int break_instruction(uint64_t vector, istate_t *istate); |
extern void universal_handler(uint64_t vector, istate_t *istate); |
extern void nop_handler(uint64_t vector, istate_t *istate); |
extern void external_interrupt(uint64_t vector, istate_t *istate); |
extern void virtual_interrupt(uint64_t irq, void *param); |
extern void disabled_fp_register(uint64_t vector, istate_t *istate); |
/kernel/trunk/arch/ia64/include/byteorder.h |
---|
36,8 → 36,8 |
#define __ia64_BYTEORDER_H__ |
/* IA-64 is little-endian */ |
#define __native_le2host(n) (n) |
#define __u64_le2host(n) (n) |
#define unative_t_le2host(n) (n) |
#define uint64_t_le2host(n) (n) |
#endif |
/kernel/trunk/arch/ia64/include/types.h |
---|
26,7 → 26,7 |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup ia64 |
/** @addtogroup ia64 |
* @{ |
*/ |
/** @file |
37,36 → 37,37 |
#define NULL 0 |
typedef signed char __s8; |
typedef signed short int __s16; |
typedef signed int __s32; |
typedef signed long __s64; |
typedef signed char int8_t; |
typedef signed short int int16_t; |
typedef signed int int32_t; |
typedef signed long int64_t; |
typedef unsigned char __u8; |
typedef unsigned short __u16; |
typedef unsigned int __u32; |
typedef unsigned long __u64; |
typedef unsigned char uint8_t; |
typedef unsigned short uint16_t; |
typedef unsigned int uint32_t; |
typedef unsigned long uint64_t; |
typedef unsigned char __r8; /*Reserve byte*/ |
typedef unsigned char __r8; /* Reserve byte */ |
typedef unsigned short __r16; |
typedef unsigned int __r32; |
typedef unsigned long __r64; |
typedef struct __r128{__r64 lo;__r64 hi;} __r128; |
typedef struct __r128 { |
__r64 lo; |
__r64 hi; |
} __r128; |
typedef uint64_t uintptr_t; |
typedef uint64_t pfn_t; |
typedef __u64 __address; |
typedef __u64 pfn_t; |
typedef uint64_t ipl_t; |
typedef __u64 ipl_t; |
typedef uint64_t unative_t; |
typedef int64_t native_t; |
typedef __u64 __native; |
typedef __s64 __snative; |
typedef struct pte pte_t; |
#endif |
/** @} |
/** @} |
*/ |
/kernel/trunk/arch/ia64/include/memstr.h |
---|
37,10 → 37,10 |
#define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt)) |
extern void memsetw(__address dst, size_t cnt, __u16 x); |
extern void memsetb(__address dst, size_t cnt, __u8 x); |
extern void memsetw(uintptr_t dst, size_t cnt, uint16_t x); |
extern void memsetb(uintptr_t dst, size_t cnt, uint8_t x); |
extern int memcmp(__address src, __address dst, int cnt); |
extern int memcmp(uintptr_t src, uintptr_t dst, int cnt); |
#endif |
/kernel/trunk/arch/ia64/include/faddr.h |
---|
26,14 → 26,14 |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup ia64 |
/** @addtogroup ia64 |
* @{ |
*/ |
/** @file |
*/ |
#ifndef __ia64_FADDR_H__ |
#define __ia64_FADDR_H__ |
#ifndef KERN_ia64_FADDR_H_ |
#define KERN_ia64_FADDR_H_ |
#include <arch/types.h> |
45,10 → 45,9 |
* @param f Function pointer. |
* |
*/ |
#define FADDR(f) (*((__address *)(f))); |
#define FADDR(f) (*((uintptr_t *)(f))); |
#endif |
/** @} |
/** @} |
*/ |
/kernel/trunk/arch/ia64/include/asm.h |
---|
45,9 → 45,9 |
* The stack is assumed to be STACK_SIZE long. |
* The stack must start on page boundary. |
*/ |
static inline __address get_stack_base(void) |
static inline uintptr_t get_stack_base(void) |
{ |
__u64 v; |
uint64_t v; |
__asm__ volatile ("and %0 = %1, r12" : "=r" (v) : "r" (~(STACK_SIZE-1))); |
58,9 → 58,9 |
* |
* @return PSR. |
*/ |
static inline __u64 psr_read(void) |
static inline uint64_t psr_read(void) |
{ |
__u64 v; |
uint64_t v; |
__asm__ volatile ("mov %0 = psr\n" : "=r" (v)); |
71,9 → 71,9 |
* |
* @return Return location of interruption vector table. |
*/ |
static inline __u64 iva_read(void) |
static inline uint64_t iva_read(void) |
{ |
__u64 v; |
uint64_t v; |
__asm__ volatile ("mov %0 = cr.iva\n" : "=r" (v)); |
84,7 → 84,7 |
* |
* @param v New location of interruption vector table. |
*/ |
static inline void iva_write(__u64 v) |
static inline void iva_write(uint64_t v) |
{ |
__asm__ volatile ("mov cr.iva = %0\n" : : "r" (v)); |
} |
94,9 → 94,9 |
* |
* @return Highest priority, pending, unmasked external interrupt vector. |
*/ |
static inline __u64 ivr_read(void) |
static inline uint64_t ivr_read(void) |
{ |
__u64 v; |
uint64_t v; |
__asm__ volatile ("mov %0 = cr.ivr\n" : "=r" (v)); |
107,7 → 107,7 |
* |
* @param v New counter value. |
*/ |
static inline void itc_write(__u64 v) |
static inline void itc_write(uint64_t v) |
{ |
__asm__ volatile ("mov ar.itc = %0\n" : : "r" (v)); |
} |
116,9 → 116,9 |
* |
* @return Current counter value. |
*/ |
static inline __u64 itc_read(void) |
static inline uint64_t itc_read(void) |
{ |
__u64 v; |
uint64_t v; |
__asm__ volatile ("mov %0 = ar.itc\n" : "=r" (v)); |
129,7 → 129,7 |
* |
* @param v New match value. |
*/ |
static inline void itm_write(__u64 v) |
static inline void itm_write(uint64_t v) |
{ |
__asm__ volatile ("mov cr.itm = %0\n" : : "r" (v)); |
} |
138,9 → 138,9 |
* |
* @return Match value. |
*/ |
static inline __u64 itm_read(void) |
static inline uint64_t itm_read(void) |
{ |
__u64 v; |
uint64_t v; |
__asm__ volatile ("mov %0 = cr.itm\n" : "=r" (v)); |
151,9 → 151,9 |
* |
* @return Current vector and mask bit. |
*/ |
static inline __u64 itv_read(void) |
static inline uint64_t itv_read(void) |
{ |
__u64 v; |
uint64_t v; |
__asm__ volatile ("mov %0 = cr.itv\n" : "=r" (v)); |
164,7 → 164,7 |
* |
* @param v New vector and mask bit. |
*/ |
static inline void itv_write(__u64 v) |
static inline void itv_write(uint64_t v) |
{ |
__asm__ volatile ("mov cr.itv = %0\n" : : "r" (v)); |
} |
173,7 → 173,7 |
* |
* @param v This value is ignored. |
*/ |
static inline void eoi_write(__u64 v) |
static inline void eoi_write(uint64_t v) |
{ |
__asm__ volatile ("mov cr.eoi = %0\n" : : "r" (v)); |
} |
182,9 → 182,9 |
* |
* @return Current value of TPR. |
*/ |
static inline __u64 tpr_read(void) |
static inline uint64_t tpr_read(void) |
{ |
__u64 v; |
uint64_t v; |
__asm__ volatile ("mov %0 = cr.tpr\n" : "=r" (v)); |
195,7 → 195,7 |
* |
* @param v New value of TPR. |
*/ |
static inline void tpr_write(__u64 v) |
static inline void tpr_write(uint64_t v) |
{ |
__asm__ volatile ("mov cr.tpr = %0\n" : : "r" (v)); |
} |
209,7 → 209,7 |
*/ |
static ipl_t interrupts_disable(void) |
{ |
__u64 v; |
uint64_t v; |
__asm__ volatile ( |
"mov %0 = psr\n" |
230,7 → 230,7 |
*/ |
static ipl_t interrupts_enable(void) |
{ |
__u64 v; |
uint64_t v; |
__asm__ volatile ( |
"mov %0 = psr\n" |
275,9 → 275,9 |
extern void cpu_halt(void); |
extern void cpu_sleep(void); |
extern void asm_delay_loop(__u32 t); |
extern void asm_delay_loop(uint32_t t); |
extern void switch_to_userspace(__address entry, __address sp, __address bsp, __address uspace_uarg, __u64 ipsr, __u64 rsc); |
extern void switch_to_userspace(uintptr_t entry, uintptr_t sp, uintptr_t bsp, uintptr_t uspace_uarg, uint64_t ipsr, uint64_t rsc); |
#endif |
/kernel/trunk/arch/ia64/include/mm/page.h |
---|
27,7 → 27,7 |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup ia64mm |
/** @addtogroup ia64mm |
* @{ |
*/ |
/** @file |
61,8 → 61,8 |
#define REGION_REGISTERS 8 |
#define KA2PA(x) ((__address) (x-(VRN_KERNEL<<VRN_SHIFT))) |
#define PA2KA(x) ((__address) (x+(VRN_KERNEL<<VRN_SHIFT))) |
#define KA2PA(x) ((uintptr_t) (x-(VRN_KERNEL<<VRN_SHIFT))) |
#define PA2KA(x) ((uintptr_t) (x+(VRN_KERNEL<<VRN_SHIFT))) |
#define VHPT_WIDTH 20 /* 1M */ |
#define VHPT_SIZE (1 << VHPT_WIDTH) |
126,7 → 126,7 |
union vhpt_tag tag; |
/* Word 3 */ |
__u64 ig3 : 64; |
uint64_t ig3 : 64; |
} __attribute__ ((packed)); |
struct vhpt_entry_not_present { |
144,13 → 144,13 |
union vhpt_tag tag; |
/* Word 3 */ |
__u64 ig3 : 64; |
uint64_t ig3 : 64; |
} __attribute__ ((packed)); |
typedef union vhpt_entry { |
struct vhpt_entry_present present; |
struct vhpt_entry_not_present not_present; |
__u64 word[4]; |
uint64_t word[4]; |
} vhpt_entry_t; |
struct region_register_map { |
177,7 → 177,7 |
typedef union pta_register { |
struct pta_register_map map; |
__u64 word; |
uint64_t word; |
} pta_register; |
/** Return Translation Hashed Entry Address. |
189,9 → 189,9 |
* |
* @return Address of the head of VHPT collision chain. |
*/ |
static inline __u64 thash(__u64 va) |
static inline uint64_t thash(uint64_t va) |
{ |
__u64 ret; |
uint64_t ret; |
__asm__ volatile ("thash %0 = %1\n" : "=r" (ret) : "r" (va)); |
207,9 → 207,9 |
* |
* @return The unique tag for VPN and RID in the collision chain returned by thash(). |
*/ |
static inline __u64 ttag(__u64 va) |
static inline uint64_t ttag(uint64_t va) |
{ |
__u64 ret; |
uint64_t ret; |
__asm__ volatile ("ttag %0 = %1\n" : "=r" (ret) : "r" (va)); |
222,9 → 222,9 |
* |
* @return Current contents of rr[i]. |
*/ |
static inline __u64 rr_read(index_t i) |
static inline uint64_t rr_read(index_t i) |
{ |
__u64 ret; |
uint64_t ret; |
ASSERT(i < REGION_REGISTERS); |
__asm__ volatile ("mov %0 = rr[%1]\n" : "=r" (ret) : "r" (i << VRN_SHIFT)); |
return ret; |
235,7 → 235,7 |
* @param i Region register index. |
* @param v Value to be written to rr[i]. |
*/ |
static inline void rr_write(index_t i, __u64 v) |
static inline void rr_write(index_t i, uint64_t v) |
{ |
ASSERT(i < REGION_REGISTERS); |
__asm__ volatile ( |
249,9 → 249,9 |
* |
* @return Current value stored in PTA. |
*/ |
static inline __u64 pta_read(void) |
static inline uint64_t pta_read(void) |
{ |
__u64 ret; |
uint64_t ret; |
__asm__ volatile ("mov %0 = cr.pta\n" : "=r" (ret)); |
262,7 → 262,7 |
* |
* @param v New value to be stored in PTA. |
*/ |
static inline void pta_write(__u64 v) |
static inline void pta_write(uint64_t v) |
{ |
__asm__ volatile ("mov cr.pta = %0\n" : : "r" (v)); |
} |
269,9 → 269,9 |
extern void page_arch_init(void); |
extern vhpt_entry_t *vhpt_hash(__address page, asid_t asid); |
extern bool vhpt_compare(__address page, asid_t asid, vhpt_entry_t *v); |
extern void vhpt_set_record(vhpt_entry_t *v, __address page, asid_t asid, __address frame, int flags); |
extern vhpt_entry_t *vhpt_hash(uintptr_t page, asid_t asid); |
extern bool vhpt_compare(uintptr_t page, asid_t asid, vhpt_entry_t *v); |
extern void vhpt_set_record(vhpt_entry_t *v, uintptr_t page, asid_t asid, uintptr_t frame, int flags); |
#endif /* __ASM__ */ |
279,6 → 279,5 |
#endif |
/** @} |
/** @} |
*/ |
/kernel/trunk/arch/ia64/include/mm/asid.h |
---|
39,8 → 39,8 |
#include <arch/types.h> |
typedef __u16 asid_t; |
typedef __u32 rid_t; |
typedef uint16_t asid_t; |
typedef uint32_t rid_t; |
#endif /* __ASM__ */ |
/kernel/trunk/arch/ia64/include/mm/tlb.h |
---|
52,7 → 52,7 |
/** Portion of TLB insertion format data structure. */ |
union tlb_entry { |
__u64 word[2]; |
uint64_t word[2]; |
struct { |
/* Word 0 */ |
unsigned p : 1; /**< Present. */ |
76,27 → 76,27 |
} __attribute__ ((packed)); |
typedef union tlb_entry tlb_entry_t; |
extern void tc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtc); |
extern void dtc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry); |
extern void itc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry); |
extern void tc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtc); |
extern void dtc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry); |
extern void itc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry); |
extern void tr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr); |
extern void dtr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr); |
extern void itr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr); |
extern void tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr); |
extern void dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr); |
extern void itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr); |
extern void dtlb_kernel_mapping_insert(__address page, __address frame, bool dtr, index_t tr); |
extern void dtr_purge(__address page, count_t width); |
extern void dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr, index_t tr); |
extern void dtr_purge(uintptr_t page, count_t width); |
extern void dtc_pte_copy(pte_t *t); |
extern void itc_pte_copy(pte_t *t); |
extern void alternate_instruction_tlb_fault(__u64 vector, istate_t *istate); |
extern void alternate_data_tlb_fault(__u64 vector, istate_t *istate); |
extern void data_nested_tlb_fault(__u64 vector, istate_t *istate); |
extern void data_dirty_bit_fault(__u64 vector, istate_t *istate); |
extern void instruction_access_bit_fault(__u64 vector, istate_t *istate); |
extern void data_access_bit_fault(__u64 vector, istate_t *istate); |
extern void page_not_present(__u64 vector, istate_t *istate); |
extern void alternate_instruction_tlb_fault(uint64_t vector, istate_t *istate); |
extern void alternate_data_tlb_fault(uint64_t vector, istate_t *istate); |
extern void data_nested_tlb_fault(uint64_t vector, istate_t *istate); |
extern void data_dirty_bit_fault(uint64_t vector, istate_t *istate); |
extern void instruction_access_bit_fault(uint64_t vector, istate_t *istate); |
extern void data_access_bit_fault(uint64_t vector, istate_t *istate); |
extern void page_not_present(uint64_t vector, istate_t *istate); |
#endif |
/kernel/trunk/arch/ia64/include/mm/vhpt.h |
---|
39,7 → 39,7 |
#include <arch/mm/tlb.h> |
#include <arch/mm/page.h> |
__address vhpt_set_up(void); |
uintptr_t vhpt_set_up(void); |
static inline vhpt_entry_t tlb_entry_t2vhpt_entry_t(tlb_entry_t tentry) |
{ |
51,7 → 51,7 |
return ventry; |
} |
void vhpt_mapping_insert(__address va, asid_t asid, tlb_entry_t entry); |
void vhpt_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry); |
void vhpt_invalidate_all(void); |
void vhpt_invalidate_asid(asid_t asid); |
/kernel/trunk/arch/ia64/include/context.h |
---|
56,10 → 56,10 |
/* RSE stack starts at the bottom of memory stack. */ |
#define context_set(c, _pc, stack, size) \ |
do { \ |
(c)->pc = (__address) _pc; \ |
(c)->bsp = ((__address) stack) + ALIGN_UP((size), REGISTER_STACK_ALIGNMENT); \ |
(c)->pc = (uintptr_t) _pc; \ |
(c)->bsp = ((uintptr_t) stack) + ALIGN_UP((size), REGISTER_STACK_ALIGNMENT); \ |
(c)->ar_pfs &= PFM_MASK; \ |
(c)->sp = ((__address) stack) + ALIGN_UP((size), STACK_ALIGNMENT) - SP_DELTA; \ |
(c)->sp = ((uintptr_t) stack) + ALIGN_UP((size), STACK_ALIGNMENT) - SP_DELTA; \ |
} while (0); |
/* |
71,39 → 71,39 |
/* |
* Application registers |
*/ |
__u64 ar_pfs; |
__u64 ar_unat_caller; |
__u64 ar_unat_callee; |
__u64 ar_rsc; |
__address bsp; /* ar_bsp */ |
__u64 ar_rnat; |
__u64 ar_lc; |
uint64_t ar_pfs; |
uint64_t ar_unat_caller; |
uint64_t ar_unat_callee; |
uint64_t ar_rsc; |
uintptr_t bsp; /* ar_bsp */ |
uint64_t ar_rnat; |
uint64_t ar_lc; |
/* |
* General registers |
*/ |
__u64 r1; |
__u64 r4; |
__u64 r5; |
__u64 r6; |
__u64 r7; |
__address sp; /* r12 */ |
__u64 r13; |
uint64_t r1; |
uint64_t r4; |
uint64_t r5; |
uint64_t r6; |
uint64_t r7; |
uintptr_t sp; /* r12 */ |
uint64_t r13; |
/* |
* Branch registers |
*/ |
__address pc; /* b0 */ |
__u64 b1; |
__u64 b2; |
__u64 b3; |
__u64 b4; |
__u64 b5; |
uintptr_t pc; /* b0 */ |
uint64_t b1; |
uint64_t b2; |
uint64_t b3; |
uint64_t b4; |
uint64_t b5; |
/* |
* Predicate registers |
*/ |
__u64 pr; |
uint64_t pr; |
__r128 f2 __attribute__ ((aligned(16))); |
__r128 f3; |
/kernel/trunk/arch/ia64/include/register.h |
---|
136,7 → 136,7 |
/** Processor Status Register. */ |
union psr { |
__u64 value; |
uint64_t value; |
struct { |
unsigned : 1; |
unsigned be : 1; /**< Big-Endian data accesses. */ |
179,7 → 179,7 |
/** Register Stack Configuration Register */ |
union rsc { |
__u64 value; |
uint64_t value; |
struct { |
unsigned mode : 2; |
unsigned pl : 2; /**< Privilege Level. */ |
192,8 → 192,8 |
/** External Interrupt Vector Register */ |
union cr_ivr { |
__u8 vector; |
__u64 value; |
uint8_t vector; |
uint64_t value; |
}; |
typedef union cr_ivr cr_ivr_t; |
206,7 → 206,7 |
unsigned : 8; |
unsigned mmi: 1; /**< Mask Maskable Interrupts. */ |
} __attribute__ ((packed)); |
__u64 value; |
uint64_t value; |
}; |
typedef union cr_tpr cr_tpr_t; |
220,7 → 220,7 |
unsigned : 3; |
unsigned m : 1; /**< Mask. */ |
} __attribute__ ((packed)); |
__u64 value; |
uint64_t value; |
}; |
typedef union cr_itv cr_itv_t; |
234,9 → 234,9 |
unsigned ge_na : 4; |
unsigned ge_code : 4; |
} __attribute__ ((packed)); |
__u16 code; |
uint16_t code; |
}; |
__u8 vector; |
uint8_t vector; |
unsigned : 8; |
unsigned x : 1; /**< Execute exception. */ |
unsigned w : 1; /**< Write exception. */ |
251,7 → 251,7 |
unsigned ed : 1; /**< Exception Deferral. */ |
unsigned : 20; |
} __attribute__ ((packed)); |
__u64 value; |
uint64_t value; |
}; |
typedef union cr_isr cr_isr_t; |
259,13 → 259,13 |
/** CPUID Register 3 */ |
union cpuid3 { |
struct { |
__u8 number; |
__u8 revision; |
__u8 model; |
__u8 family; |
__u8 archrev; |
uint8_t number; |
uint8_t revision; |
uint8_t model; |
uint8_t family; |
uint8_t archrev; |
} __attribute__ ((packed)); |
__u64 value; |
uint64_t value; |
}; |
typedef union cpuid3 cpuid3_t; |
/kernel/trunk/arch/ia64/include/cpu.h |
---|
43,8 → 43,8 |
#define FAMILY_ITANIUM2 0x1f |
struct cpu_arch { |
__u64 cpuid0; |
__u64 cpuid1; |
uint64_t cpuid0; |
uint64_t cpuid1; |
cpuid3_t cpuid3; |
}; |
54,9 → 54,9 |
* |
* @return Value of CPUID[n] register. |
*/ |
static inline __u64 cpuid_read(int n) |
static inline uint64_t cpuid_read(int n) |
{ |
__u64 v; |
uint64_t v; |
__asm__ volatile ("mov %0 = cpuid[%1]\n" : "=r" (v) : "r" (n)); |
/kernel/trunk/arch/ia64/src/fpu_context.c |
---|
282,7 → 282,7 |
void fpu_enable(void) |
{ |
__u64 a = 0 ; |
uint64_t a = 0 ; |
asm volatile( |
"rsm %0;;" |
"srlz.i\n" |
304,7 → 304,7 |
void fpu_disable(void) |
{ |
__u64 a = 0 ; |
uint64_t a = 0 ; |
asm volatile( |
"ssm %0;;\n" |
"srlz.i\n" |
325,7 → 325,7 |
void fpu_init(void) |
{ |
__u64 a = 0 ; |
uint64_t a = 0 ; |
asm volatile |
( |
"mov %0=ar.fpsr;;\n" |
/kernel/trunk/arch/ia64/src/ddi/ddi.c |
---|
47,7 → 47,7 |
* |
* @return 0 on success or an error code from errno.h. |
*/ |
int ddi_iospace_enable_arch(task_t *task, __address ioaddr, size_t size) |
int ddi_iospace_enable_arch(task_t *task, uintptr_t ioaddr, size_t size) |
{ |
return 0; |
} |
/kernel/trunk/arch/ia64/src/proc/scheduler.c |
---|
50,11 → 50,11 |
/** Prepare kernel stack pointers in bank 0 r22 and r23 and make sure the stack is mapped in DTR. */ |
void before_thread_runs_arch(void) |
{ |
__address base; |
uintptr_t base; |
base = ALIGN_DOWN(config.base, 1<<KERNEL_PAGE_WIDTH); |
if ((__address) THREAD->kstack < base || (__address) THREAD->kstack > base + (1<<(KERNEL_PAGE_WIDTH))) { |
if ((uintptr_t) THREAD->kstack < base || (uintptr_t) THREAD->kstack > base + (1<<(KERNEL_PAGE_WIDTH))) { |
/* |
* Kernel stack of this thread is not mapped by DTR[TR_KERNEL]. |
* Use DTR[TR_KSTACK1] and DTR[TR_KSTACK2] to map it. |
61,11 → 61,11 |
*/ |
/* purge DTR[TR_STACK1] and DTR[TR_STACK2] */ |
dtr_purge((__address) THREAD->kstack, PAGE_WIDTH+1); |
dtr_purge((uintptr_t) THREAD->kstack, PAGE_WIDTH+1); |
/* insert DTR[TR_STACK1] and DTR[TR_STACK2] */ |
dtlb_kernel_mapping_insert((__address) THREAD->kstack, KA2PA(THREAD->kstack), true, DTR_KSTACK1); |
dtlb_kernel_mapping_insert((__address) THREAD->kstack + PAGE_SIZE, KA2PA(THREAD->kstack) + FRAME_SIZE, true, DTR_KSTACK2); |
dtlb_kernel_mapping_insert((uintptr_t) THREAD->kstack, KA2PA(THREAD->kstack), true, DTR_KSTACK1); |
dtlb_kernel_mapping_insert((uintptr_t) THREAD->kstack + PAGE_SIZE, KA2PA(THREAD->kstack) + FRAME_SIZE, true, DTR_KSTACK2); |
} |
/* |
/kernel/trunk/arch/ia64/src/ia64.c |
---|
77,7 → 77,7 |
void arch_pre_mm_init(void) |
{ |
/* Set Interruption Vector Address (i.e. location of interruption vector table). */ |
iva_write((__address) &ivt); |
iva_write((uintptr_t) &ivt); |
srlz_d(); |
ski_init_console(); |
116,10 → 116,10 |
rsc.pl = PL_USER; |
rsc.mode = 3; /* eager mode */ |
switch_to_userspace((__address) kernel_uarg->uspace_entry, |
((__address) kernel_uarg->uspace_stack)+PAGE_SIZE-ALIGN_UP(STACK_ITEM_SIZE, STACK_ALIGNMENT), |
((__address) kernel_uarg->uspace_stack)+PAGE_SIZE, |
(__address) kernel_uarg->uspace_uarg, |
switch_to_userspace((uintptr_t) kernel_uarg->uspace_entry, |
((uintptr_t) kernel_uarg->uspace_stack)+PAGE_SIZE-ALIGN_UP(STACK_ITEM_SIZE, STACK_ALIGNMENT), |
((uintptr_t) kernel_uarg->uspace_stack)+PAGE_SIZE, |
(uintptr_t) kernel_uarg->uspace_uarg, |
psr.value, rsc.value); |
while (1) { |
131,7 → 131,7 |
* |
* We use r13 (a.k.a. tp) for this purpose. |
*/ |
__native sys_tls_set(__native addr) |
unative_t sys_tls_set(unative_t addr) |
{ |
return 0; |
} |
/kernel/trunk/arch/ia64/src/ski/ski.c |
---|
44,7 → 44,7 |
int kbd_uspace=0; |
static void ski_putchar(chardev_t *d, const char ch); |
static __s32 ski_getchar(void); |
static int32_t ski_getchar(void); |
/** Display character on debug console |
* |
78,9 → 78,9 |
* |
* @return ASCII code of pressed key or 0 if no key pressed. |
*/ |
__s32 ski_getchar(void) |
int32_t ski_getchar(void) |
{ |
__u64 ch; |
uint64_t ch; |
__asm__ volatile ( |
"mov r15=%1\n" |
92,7 → 92,7 |
: "r15", "r8" |
); |
return (__s32) ch; |
return (int32_t) ch; |
} |
/** |
/kernel/trunk/arch/ia64/src/cpu/cpu.c |
---|
51,10 → 51,10 |
void cpu_print_report(cpu_t *m) |
{ |
char *family_str; |
char vendor[2*sizeof(__u64)+1]; |
char vendor[2*sizeof(uint64_t)+1]; |
*((__u64 *) &vendor[0*sizeof(__u64)]) = CPU->arch.cpuid0; |
*((__u64 *) &vendor[1*sizeof(__u64)]) = CPU->arch.cpuid1; |
*((uint64_t *) &vendor[0*sizeof(uint64_t)]) = CPU->arch.cpuid0; |
*((uint64_t *) &vendor[1*sizeof(uint64_t)]) = CPU->arch.cpuid1; |
vendor[sizeof(vendor)-1] = '\0'; |
switch(m->arch.cpuid3.family) { |
/kernel/trunk/arch/ia64/src/mm/tlb.c |
---|
57,8 → 57,8 |
void tlb_invalidate_all(void) |
{ |
ipl_t ipl; |
__address adr; |
__u32 count1, count2, stride1, stride2; |
uintptr_t adr; |
uint32_t count1, count2, stride1, stride2; |
int i,j; |
101,7 → 101,7 |
} |
void tlb_invalidate_pages(asid_t asid, __address page, count_t cnt) |
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt) |
{ |
region_register rr; |
bool restore_rr = false; |
108,7 → 108,7 |
int b = 0; |
int c = cnt; |
__address va; |
uintptr_t va; |
va = page; |
rr.word = rr_read(VA2VRN(va)); |
129,7 → 129,7 |
while(c >>= 1) |
b++; |
b >>= 1; |
__u64 ps; |
uint64_t ps; |
switch (b) { |
case 0: /*cnt 1-3*/ |
201,7 → 201,7 |
* @param asid Address space identifier. |
* @param entry The rest of TLB entry as required by TLB insertion format. |
*/ |
void dtc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry) |
void dtc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry) |
{ |
tc_mapping_insert(va, asid, entry, true); |
} |
212,7 → 212,7 |
* @param asid Address space identifier. |
* @param entry The rest of TLB entry as required by TLB insertion format. |
*/ |
void itc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry) |
void itc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry) |
{ |
tc_mapping_insert(va, asid, entry, false); |
} |
224,7 → 224,7 |
* @param entry The rest of TLB entry as required by TLB insertion format. |
* @param dtc If true, insert into data translation cache, use instruction translation cache otherwise. |
*/ |
void tc_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtc) |
void tc_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtc) |
{ |
region_register rr; |
bool restore_rr = false; |
275,7 → 275,7 |
* @param entry The rest of TLB entry as required by TLB insertion format. |
* @param tr Translation register. |
*/ |
void itr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr) |
void itr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr) |
{ |
tr_mapping_insert(va, asid, entry, false, tr); |
} |
287,7 → 287,7 |
* @param entry The rest of TLB entry as required by TLB insertion format. |
* @param tr Translation register. |
*/ |
void dtr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, index_t tr) |
void dtr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, index_t tr) |
{ |
tr_mapping_insert(va, asid, entry, true, tr); |
} |
300,7 → 300,7 |
* @param dtr If true, insert into data translation register, use instruction translation register otherwise. |
* @param tr Translation register. |
*/ |
void tr_mapping_insert(__address va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr) |
void tr_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry, bool dtr, index_t tr) |
{ |
region_register rr; |
bool restore_rr = false; |
351,7 → 351,7 |
* @param dtr If true, insert into data translation register, use data translation cache otherwise. |
* @param tr Translation register if dtr is true, ignored otherwise. |
*/ |
void dtlb_kernel_mapping_insert(__address page, __address frame, bool dtr, index_t tr) |
void dtlb_kernel_mapping_insert(uintptr_t page, uintptr_t frame, bool dtr, index_t tr) |
{ |
tlb_entry_t entry; |
380,7 → 380,7 |
* @param page Virtual page address including VRN bits. |
* @param width Width of the purge in bits. |
*/ |
void dtr_purge(__address page, count_t width) |
void dtr_purge(uintptr_t page, count_t width) |
{ |
__asm__ volatile ("ptr.d %0, %1\n" : : "r" (page), "r" (width<<2)); |
} |
444,11 → 444,11 |
* @param vector Interruption vector. |
* @param istate Structure with saved interruption state. |
*/ |
void alternate_instruction_tlb_fault(__u64 vector, istate_t *istate) |
void alternate_instruction_tlb_fault(uint64_t vector, istate_t *istate) |
{ |
region_register rr; |
rid_t rid; |
__address va; |
uintptr_t va; |
pte_t *t; |
va = istate->cr_ifa; /* faulting address */ |
481,11 → 481,11 |
* @param vector Interruption vector. |
* @param istate Structure with saved interruption state. |
*/ |
void alternate_data_tlb_fault(__u64 vector, istate_t *istate) |
void alternate_data_tlb_fault(uint64_t vector, istate_t *istate) |
{ |
region_register rr; |
rid_t rid; |
__address va; |
uintptr_t va; |
pte_t *t; |
va = istate->cr_ifa; /* faulting address */ |
530,7 → 530,7 |
* @param vector Interruption vector. |
* @param istate Structure with saved interruption state. |
*/ |
void data_nested_tlb_fault(__u64 vector, istate_t *istate) |
void data_nested_tlb_fault(uint64_t vector, istate_t *istate) |
{ |
panic("%s\n", __FUNCTION__); |
} |
540,11 → 540,11 |
* @param vector Interruption vector. |
* @param istate Structure with saved interruption state. |
*/ |
void data_dirty_bit_fault(__u64 vector, istate_t *istate) |
void data_dirty_bit_fault(uint64_t vector, istate_t *istate) |
{ |
region_register rr; |
rid_t rid; |
__address va; |
uintptr_t va; |
pte_t *t; |
va = istate->cr_ifa; /* faulting address */ |
577,11 → 577,11 |
* @param vector Interruption vector. |
* @param istate Structure with saved interruption state. |
*/ |
void instruction_access_bit_fault(__u64 vector, istate_t *istate) |
void instruction_access_bit_fault(uint64_t vector, istate_t *istate) |
{ |
region_register rr; |
rid_t rid; |
__address va; |
uintptr_t va; |
pte_t *t; |
va = istate->cr_ifa; /* faulting address */ |
614,11 → 614,11 |
* @param vector Interruption vector. |
* @param istate Structure with saved interruption state. |
*/ |
void data_access_bit_fault(__u64 vector, istate_t *istate) |
void data_access_bit_fault(uint64_t vector, istate_t *istate) |
{ |
region_register rr; |
rid_t rid; |
__address va; |
uintptr_t va; |
pte_t *t; |
va = istate->cr_ifa; /* faulting address */ |
651,11 → 651,11 |
* @param vector Interruption vector. |
* @param istate Structure with saved interruption state. |
*/ |
void page_not_present(__u64 vector, istate_t *istate) |
void page_not_present(uint64_t vector, istate_t *istate) |
{ |
region_register rr; |
rid_t rid; |
__address va; |
uintptr_t va; |
pte_t *t; |
va = istate->cr_ifa; /* faulting address */ |
/kernel/trunk/arch/ia64/src/mm/vhpt.c |
---|
40,22 → 40,22 |
static vhpt_entry_t* vhpt_base; |
__address vhpt_set_up(void) |
uintptr_t vhpt_set_up(void) |
{ |
vhpt_base = frame_alloc(VHPT_WIDTH-FRAME_WIDTH,FRAME_KA | FRAME_ATOMIC); |
if(!vhpt_base) |
panic("Kernel configured with VHPT but no memory for table."); |
vhpt_invalidate_all(); |
return (__address) vhpt_base; |
return (uintptr_t) vhpt_base; |
} |
void vhpt_mapping_insert(__address va, asid_t asid, tlb_entry_t entry) |
void vhpt_mapping_insert(uintptr_t va, asid_t asid, tlb_entry_t entry) |
{ |
region_register rr_save, rr; |
index_t vrn; |
rid_t rid; |
__u64 tag; |
uint64_t tag; |
vhpt_entry_t *ventry; |
84,7 → 84,7 |
void vhpt_invalidate_all() |
{ |
memsetb((__address)vhpt_base,1<<VHPT_WIDTH,0); |
memsetb((uintptr_t)vhpt_base,1<<VHPT_WIDTH,0); |
} |
void vhpt_invalidate_asid(asid_t asid) |
/kernel/trunk/arch/ia64/src/mm/page.c |
---|
66,7 → 66,7 |
pta_register pta; |
int i; |
#ifdef CONFIG_VHPT |
__address vhpt_base; |
uintptr_t vhpt_base; |
#endif |
/* |
128,7 → 128,7 |
* |
* @return VHPT entry address. |
*/ |
vhpt_entry_t *vhpt_hash(__address page, asid_t asid) |
vhpt_entry_t *vhpt_hash(uintptr_t page, asid_t asid) |
{ |
region_register rr_save, rr; |
index_t vrn; |
172,7 → 172,7 |
* |
* @return True if page and asid match the page and asid of t, false otherwise. |
*/ |
bool vhpt_compare(__address page, asid_t asid, vhpt_entry_t *v) |
bool vhpt_compare(uintptr_t page, asid_t asid, vhpt_entry_t *v) |
{ |
region_register rr_save, rr; |
index_t vrn; |
216,12 → 216,12 |
* @param frame Physical address of the frame to wich page is mapped. |
* @param flags Different flags for the mapping. |
*/ |
void vhpt_set_record(vhpt_entry_t *v, __address page, asid_t asid, __address frame, int flags) |
void vhpt_set_record(vhpt_entry_t *v, uintptr_t page, asid_t asid, uintptr_t frame, int flags) |
{ |
region_register rr_save, rr; |
index_t vrn; |
rid_t rid; |
__u64 tag; |
uint64_t tag; |
ASSERT(v); |
/kernel/trunk/arch/ia64/src/interrupt.c |
---|
119,10 → 119,10 |
"Reserved" |
}; |
static char *vector_to_string(__u16 vector); |
static char *vector_to_string(uint16_t vector); |
static void dump_interrupted_context(istate_t *istate); |
char *vector_to_string(__u16 vector) |
char *vector_to_string(uint16_t vector) |
{ |
ASSERT(vector <= VECTOR_MAX); |
152,7 → 152,7 |
printf("cr.ifa=%#018llx\t(%s)\n", istate->cr_ifa, ifa); |
} |
void general_exception(__u64 vector, istate_t *istate) |
void general_exception(uint64_t vector, istate_t *istate) |
{ |
char *desc = ""; |
188,19 → 188,19 |
void fpu_enable(void); |
void disabled_fp_register(__u64 vector, istate_t *istate) |
void disabled_fp_register(uint64_t vector, istate_t *istate) |
{ |
#ifdef CONFIG_FPU_LAZY |
scheduler_fpu_lazy_request(); |
#else |
fault_if_from_uspace(istate, "Interruption: %#hx (%s)", (__u16) vector, vector_to_string(vector)); |
fault_if_from_uspace(istate, "Interruption: %#hx (%s)", (uint16_t) vector, vector_to_string(vector)); |
dump_interrupted_context(istate); |
panic("Interruption: %#hx (%s)\n", (__u16) vector, vector_to_string(vector)); |
panic("Interruption: %#hx (%s)\n", (uint16_t) vector, vector_to_string(vector)); |
#endif |
} |
void nop_handler(__u64 vector, istate_t *istate) |
void nop_handler(uint64_t vector, istate_t *istate) |
{ |
} |
207,7 → 207,7 |
/** Handle syscall. */ |
int break_instruction(__u64 vector, istate_t *istate) |
int break_instruction(uint64_t vector, istate_t *istate) |
{ |
/* |
* Move to next instruction after BREAK. |
227,14 → 227,14 |
return -1; |
} |
void universal_handler(__u64 vector, istate_t *istate) |
void universal_handler(uint64_t vector, istate_t *istate) |
{ |
fault_if_from_uspace(istate,"Interruption: %#hx (%s)\n",(__u16) vector, vector_to_string(vector)); |
fault_if_from_uspace(istate,"Interruption: %#hx (%s)\n",(uint16_t) vector, vector_to_string(vector)); |
dump_interrupted_context(istate); |
panic("Interruption: %#hx (%s)\n", (__u16) vector, vector_to_string(vector)); |
panic("Interruption: %#hx (%s)\n", (uint16_t) vector, vector_to_string(vector)); |
} |
void external_interrupt(__u64 vector, istate_t *istate) |
void external_interrupt(uint64_t vector, istate_t *istate) |
{ |
cr_ivr_t ivr; |
254,7 → 254,7 |
} |
} |
void virtual_interrupt(__u64 irq,void *param) |
void virtual_interrupt(uint64_t irq,void *param) |
{ |
switch(irq) { |
case IRQ_KBD: |
267,7 → 267,7 |
} |
/* Reregister irq to be IPC-ready */ |
void irq_ipc_bind_arch(__native irq) |
void irq_ipc_bind_arch(unative_t irq) |
{ |
if(irq==IRQ_KBD) { |
kbd_uspace=1; |
/kernel/trunk/arch/ia64/src/drivers/it.c |
---|
71,8 → 71,8 |
/** Process Interval Timer interrupt. */ |
void it_interrupt(void) |
{ |
__s64 c; |
__s64 m; |
int64_t c; |
int64_t m; |
eoi_write(EOI); |
/kernel/trunk/arch/ppc32/include/exception.h |
---|
42,47 → 42,47 |
#include <typedefs.h> |
struct istate { |
__u32 r0; |
__u32 r2; |
__u32 r3; |
__u32 r4; |
__u32 r5; |
__u32 r6; |
__u32 r7; |
__u32 r8; |
__u32 r9; |
__u32 r10; |
__u32 r11; |
__u32 r13; |
__u32 r14; |
__u32 r15; |
__u32 r16; |
__u32 r17; |
__u32 r18; |
__u32 r19; |
__u32 r20; |
__u32 r21; |
__u32 r22; |
__u32 r23; |
__u32 r24; |
__u32 r25; |
__u32 r26; |
__u32 r27; |
__u32 r28; |
__u32 r29; |
__u32 r30; |
__u32 r31; |
__u32 cr; |
__u32 pc; |
__u32 srr1; |
__u32 lr; |
__u32 ctr; |
__u32 xer; |
__u32 r12; |
__u32 sp; |
uint32_t r0; |
uint32_t r2; |
uint32_t r3; |
uint32_t r4; |
uint32_t r5; |
uint32_t r6; |
uint32_t r7; |
uint32_t r8; |
uint32_t r9; |
uint32_t r10; |
uint32_t r11; |
uint32_t r13; |
uint32_t r14; |
uint32_t r15; |
uint32_t r16; |
uint32_t r17; |
uint32_t r18; |
uint32_t r19; |
uint32_t r20; |
uint32_t r21; |
uint32_t r22; |
uint32_t r23; |
uint32_t r24; |
uint32_t r25; |
uint32_t r26; |
uint32_t r27; |
uint32_t r28; |
uint32_t r29; |
uint32_t r30; |
uint32_t r31; |
uint32_t cr; |
uint32_t pc; |
uint32_t srr1; |
uint32_t lr; |
uint32_t ctr; |
uint32_t xer; |
uint32_t r12; |
uint32_t sp; |
}; |
static inline void istate_set_retaddr(istate_t *istate, __address retaddr) |
static inline void istate_set_retaddr(istate_t *istate, uintptr_t retaddr) |
{ |
istate->pc = retaddr; |
} |
93,7 → 93,7 |
panic("istate_from_uspace not yet implemented"); |
return 0; |
} |
static inline __native istate_get_pc(istate_t *istate) |
static inline unative_t istate_get_pc(istate_t *istate) |
{ |
return istate->pc; |
} |
/kernel/trunk/arch/ppc32/include/fpu_context.h |
---|
40,25 → 40,25 |
#endif |
struct fpu_context { |
__u64 fr14; |
__u64 fr15; |
__u64 fr16; |
__u64 fr17; |
__u64 fr18; |
__u64 fr19; |
__u64 fr20; |
__u64 fr21; |
__u64 fr22; |
__u64 fr23; |
__u64 fr24; |
__u64 fr25; |
__u64 fr26; |
__u64 fr27; |
__u64 fr28; |
__u64 fr29; |
__u64 fr30; |
__u64 fr31; |
__u32 fpscr; |
uint64_t fr14; |
uint64_t fr15; |
uint64_t fr16; |
uint64_t fr17; |
uint64_t fr18; |
uint64_t fr19; |
uint64_t fr20; |
uint64_t fr21; |
uint64_t fr22; |
uint64_t fr23; |
uint64_t fr24; |
uint64_t fr25; |
uint64_t fr26; |
uint64_t fr27; |
uint64_t fr28; |
uint64_t fr29; |
uint64_t fr30; |
uint64_t fr31; |
uint32_t fpscr; |
} __attribute__ ((packed)); |
#endif |
/kernel/trunk/arch/ppc32/include/byteorder.h |
---|
40,24 → 40,24 |
#define BIG_ENDIAN |
static inline __u64 __u64_le2host(__u64 n) |
static inline uint64_t uint64_t_le2host(uint64_t n) |
{ |
return __u64_byteorder_swap(n); |
return uint64_t_byteorder_swap(n); |
} |
/** Convert little-endian __native to host __native |
/** Convert little-endian unative_t to host unative_t |
* |
* Convert little-endian __native parameter to host endianess. |
* Convert little-endian unative_t parameter to host endianess. |
* |
* @param n Little-endian __native argument. |
* @param n Little-endian unative_t argument. |
* |
* @return Result in host endianess. |
* |
*/ |
static inline __native __native_le2host(__native n) |
static inline unative_t unative_t_le2host(unative_t n) |
{ |
__address v; |
uintptr_t v; |
asm volatile ( |
"lwbrx %0, %1, %2\n" |
/kernel/trunk/arch/ppc32/include/cpuid.h |
---|
38,8 → 38,8 |
#include <arch/types.h> |
struct cpu_info { |
__u16 version; |
__u16 revision; |
uint16_t version; |
uint16_t revision; |
} __attribute__ ((packed)); |
static inline void cpu_version(struct cpu_info *info) |
/kernel/trunk/arch/ppc32/include/types.h |
---|
37,22 → 37,22 |
#define NULL 0 |
typedef signed char __s8; |
typedef signed short __s16; |
typedef signed int __s32; |
typedef signed long long __s64; |
typedef signed char int8_t; |
typedef signed short int16_t; |
typedef signed int int32_t; |
typedef signed long long int64_t; |
typedef unsigned char __u8; |
typedef unsigned short __u16; |
typedef unsigned int __u32; |
typedef unsigned long long __u64; |
typedef unsigned char uint8_t; |
typedef unsigned short uint16_t; |
typedef unsigned int uint32_t; |
typedef unsigned long long uint64_t; |
typedef __u32 __address; |
typedef __u32 pfn_t; |
typedef uint32_t uintptr_t; |
typedef uint32_t pfn_t; |
typedef __u32 ipl_t; |
typedef uint32_t ipl_t; |
typedef __u32 __native; |
typedef uint32_t unative_t; |
/** Page Table Entry. */ |
typedef struct { |
/kernel/trunk/arch/ppc32/include/memstr.h |
---|
37,10 → 37,10 |
#define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt)) |
extern void memsetw(__address dst, size_t cnt, __u16 x); |
extern void memsetb(__address dst, size_t cnt, __u8 x); |
extern void memsetw(uintptr_t dst, size_t cnt, uint16_t x); |
extern void memsetb(uintptr_t dst, size_t cnt, uint8_t x); |
extern int memcmp(__address src, __address dst, int cnt); |
extern int memcmp(uintptr_t src, uintptr_t dst, int cnt); |
#endif |
/kernel/trunk/arch/ppc32/include/boot/boot.h |
---|
48,28 → 48,28 |
#include <arch/types.h> |
typedef struct { |
__address addr; |
__u32 size; |
uintptr_t addr; |
uint32_t size; |
} utask_t; |
typedef struct { |
__u32 count; |
uint32_t count; |
utask_t tasks[TASKMAP_MAX_RECORDS]; |
} taskmap_t; |
typedef struct { |
__address start; |
__u32 size; |
uintptr_t start; |
uint32_t size; |
} memzone_t; |
typedef struct { |
__u32 total; |
__u32 count; |
uint32_t total; |
uint32_t count; |
memzone_t zones[MEMMAP_MAX_RECORDS]; |
} memmap_t; |
typedef struct { |
__address addr; |
uintptr_t addr; |
unsigned int width; |
unsigned int height; |
unsigned int bpp; |
77,7 → 77,7 |
} screen_t; |
typedef struct { |
__address addr; |
uintptr_t addr; |
unsigned int size; |
} keyboard_t; |
/kernel/trunk/arch/ppc32/include/asm.h |
---|
128,9 → 128,9 |
* The stack is assumed to be STACK_SIZE bytes long. |
* The stack must start on page boundary. |
*/ |
static inline __address get_stack_base(void) |
static inline uintptr_t get_stack_base(void) |
{ |
__address v; |
uintptr_t v; |
asm volatile ( |
"and %0, %%sp, %1\n" |
145,9 → 145,9 |
} |
void cpu_halt(void); |
void asm_delay_loop(__u32 t); |
void asm_delay_loop(uint32_t t); |
extern void userspace_asm(__address uspace_uarg, __address stack, __address entry); |
extern void userspace_asm(uintptr_t uspace_uarg, uintptr_t stack, uintptr_t entry); |
#endif |
/kernel/trunk/arch/ppc32/include/faddr.h |
---|
37,7 → 37,7 |
#include <arch/types.h> |
#define FADDR(fptr) ((__address) (fptr)) |
#define FADDR(fptr) ((uintptr_t) (fptr)) |
#endif |
/kernel/trunk/arch/ppc32/include/mm/frame.h |
---|
43,7 → 43,7 |
#include <arch/types.h> |
extern __address last_frame; |
extern uintptr_t last_frame; |
extern void frame_arch_init(void); |
/kernel/trunk/arch/ppc32/include/mm/page.h |
---|
43,8 → 43,8 |
#ifdef KERNEL |
#ifndef __ASM__ |
# define KA2PA(x) (((__address) (x)) - 0x80000000) |
# define PA2KA(x) (((__address) (x)) + 0x80000000) |
# define KA2PA(x) (((uintptr_t) (x)) - 0x80000000) |
# define PA2KA(x) (((uintptr_t) (x)) + 0x80000000) |
#else |
# define KA2PA(x) ((x) - 0x80000000) |
# define PA2KA(x) ((x) + 0x80000000) |
94,7 → 94,7 |
#define SET_PTL3_FLAGS_ARCH(ptl2, i, x) |
#define SET_FRAME_FLAGS_ARCH(ptl3, i, x) set_pt_flags((pte_t *) (ptl3), (index_t) (i), (x)) |
#define PTE_VALID_ARCH(pte) (*((__u32 *) (pte)) != 0) |
#define PTE_VALID_ARCH(pte) (*((uint32_t *) (pte)) != 0) |
#define PTE_PRESENT_ARCH(pte) ((pte)->p != 0) |
#define PTE_GET_FRAME_ARCH(pte) ((pte)->pfn << 12) |
#define PTE_WRITABLE_ARCH(pte) 1 |
/kernel/trunk/arch/ppc32/include/mm/asid.h |
---|
39,7 → 39,7 |
#define ASID_MAX_ARCH 4096 |
typedef __u32 asid_t; |
typedef uint32_t asid_t; |
#endif |
/kernel/trunk/arch/ppc32/include/context.h |
---|
42,31 → 42,31 |
#define SP_DELTA 16 |
struct context { |
__address sp; |
__address pc; |
uintptr_t sp; |
uintptr_t pc; |
__u32 r2; |
__u32 r13; |
__u32 r14; |
__u32 r15; |
__u32 r16; |
__u32 r17; |
__u32 r18; |
__u32 r19; |
__u32 r20; |
__u32 r21; |
__u32 r22; |
__u32 r23; |
__u32 r24; |
__u32 r25; |
__u32 r26; |
__u32 r27; |
__u32 r28; |
__u32 r29; |
__u32 r30; |
__u32 r31; |
uint32_t r2; |
uint32_t r13; |
uint32_t r14; |
uint32_t r15; |
uint32_t r16; |
uint32_t r17; |
uint32_t r18; |
uint32_t r19; |
uint32_t r20; |
uint32_t r21; |
uint32_t r22; |
uint32_t r23; |
uint32_t r24; |
uint32_t r25; |
uint32_t r26; |
uint32_t r27; |
uint32_t r28; |
uint32_t r29; |
uint32_t r30; |
uint32_t r31; |
__u32 cr; |
uint32_t cr; |
ipl_t ipl; |
} __attribute__ ((packed)); |
/kernel/trunk/arch/ppc32/include/drivers/pic.h |
---|
42,7 → 42,7 |
#define PIC_ACK_LOW 10 |
#define PIC_ACK_HIGH 6 |
void pic_init(__address base, size_t size); |
void pic_init(uintptr_t base, size_t size); |
void pic_enable_interrupt(int intnum); |
void pic_disable_interrupt(int intnum); |
void pic_ack_interrupt(int intnum); |
/kernel/trunk/arch/ppc32/include/drivers/cuda.h |
---|
40,7 → 40,7 |
#define CUDA_IRQ 10 |
extern void cuda_init(__address base, size_t size); |
extern void cuda_init(uintptr_t base, size_t size); |
extern int cuda_get_scancode(void); |
extern void cuda_grab(void); |
extern void cuda_release(void); |
/kernel/trunk/arch/ppc32/src/ddi/ddi.c |
---|
47,7 → 47,7 |
* |
* @return 0 on success or an error code from errno.h. |
*/ |
int ddi_iospace_enable_arch(task_t *task, __address ioaddr, size_t size) |
int ddi_iospace_enable_arch(task_t *task, uintptr_t ioaddr, size_t size) |
{ |
return 0; |
} |
/kernel/trunk/arch/ppc32/src/ppc32.c |
---|
50,7 → 50,7 |
/* Setup usermode */ |
init.cnt = bootinfo.taskmap.count; |
__u32 i; |
uint32_t i; |
for (i = 0; i < bootinfo.taskmap.count; i++) { |
init.tasks[i].addr = PA2KA(bootinfo.taskmap.tasks[i].addr); |
97,7 → 97,7 |
void userspace(uspace_arg_t *kernel_uarg) |
{ |
userspace_asm((__address) kernel_uarg->uspace_uarg, (__address) kernel_uarg->uspace_stack + THREAD_STACK_SIZE - SP_DELTA, (__address) kernel_uarg->uspace_entry); |
userspace_asm((uintptr_t) kernel_uarg->uspace_uarg, (uintptr_t) kernel_uarg->uspace_stack + THREAD_STACK_SIZE - SP_DELTA, (uintptr_t) kernel_uarg->uspace_entry); |
/* Unreachable */ |
for (;;) |
/kernel/trunk/arch/ppc32/src/mm/tlb.c |
---|
56,7 → 56,7 |
* @return PTE on success, NULL otherwise. |
* |
*/ |
static pte_t *find_mapping_and_check(as_t *as, bool lock, __address badvaddr, int access, istate_t *istate, int *pfrc) |
static pte_t *find_mapping_and_check(as_t *as, bool lock, uintptr_t badvaddr, int access, istate_t *istate, int *pfrc) |
{ |
/* |
* Check if the mapping exists in page tables. |
103,7 → 103,7 |
} |
static void pht_refill_fail(__address badvaddr, istate_t *istate) |
static void pht_refill_fail(uintptr_t badvaddr, istate_t *istate) |
{ |
char *symbol = ""; |
char *sym2 = ""; |
118,12 → 118,12 |
} |
static void pht_insert(const __address vaddr, const pfn_t pfn) |
static void pht_insert(const uintptr_t vaddr, const pfn_t pfn) |
{ |
__u32 page = (vaddr >> 12) & 0xffff; |
__u32 api = (vaddr >> 22) & 0x3f; |
uint32_t page = (vaddr >> 12) & 0xffff; |
uint32_t api = (vaddr >> 22) & 0x3f; |
__u32 vsid; |
uint32_t vsid; |
asm volatile ( |
"mfsrin %0, %1\n" |
: "=r" (vsid) |
130,7 → 130,7 |
: "r" (vaddr) |
); |
__u32 sdr1; |
uint32_t sdr1; |
asm volatile ( |
"mfsdr1 %0\n" |
: "=r" (sdr1) |
138,10 → 138,10 |
phte_t *phte = (phte_t *) PA2KA(sdr1 & 0xffff0000); |
/* Primary hash (xor) */ |
__u32 h = 0; |
__u32 hash = vsid ^ page; |
__u32 base = (hash & 0x3ff) << 3; |
__u32 i; |
uint32_t h = 0; |
uint32_t hash = vsid ^ page; |
uint32_t base = (hash & 0x3ff) << 3; |
uint32_t i; |
bool found = false; |
/* Find unused or colliding |
155,7 → 155,7 |
if (!found) { |
/* Secondary hash (not) */ |
__u32 base2 = (~hash & 0x3ff) << 3; |
uint32_t base2 = (~hash & 0x3ff) << 3; |
/* Find unused or colliding |
PTE in PTEG */ |
185,12 → 185,12 |
} |
static void pht_real_insert(const __address vaddr, const pfn_t pfn) |
static void pht_real_insert(const uintptr_t vaddr, const pfn_t pfn) |
{ |
__u32 page = (vaddr >> 12) & 0xffff; |
__u32 api = (vaddr >> 22) & 0x3f; |
uint32_t page = (vaddr >> 12) & 0xffff; |
uint32_t api = (vaddr >> 22) & 0x3f; |
__u32 vsid; |
uint32_t vsid; |
asm volatile ( |
"mfsrin %0, %1\n" |
: "=r" (vsid) |
197,7 → 197,7 |
: "r" (vaddr) |
); |
__u32 sdr1; |
uint32_t sdr1; |
asm volatile ( |
"mfsdr1 %0\n" |
: "=r" (sdr1) |
205,10 → 205,10 |
phte_t *phte_physical = (phte_t *) (sdr1 & 0xffff0000); |
/* Primary hash (xor) */ |
__u32 h = 0; |
__u32 hash = vsid ^ page; |
__u32 base = (hash & 0x3ff) << 3; |
__u32 i; |
uint32_t h = 0; |
uint32_t hash = vsid ^ page; |
uint32_t base = (hash & 0x3ff) << 3; |
uint32_t i; |
bool found = false; |
/* Find unused or colliding |
222,7 → 222,7 |
if (!found) { |
/* Secondary hash (not) */ |
__u32 base2 = (~hash & 0x3ff) << 3; |
uint32_t base2 = (~hash & 0x3ff) << 3; |
/* Find unused or colliding |
PTE in PTEG */ |
260,7 → 260,7 |
*/ |
void pht_refill(int n, istate_t *istate) |
{ |
__address badvaddr; |
uintptr_t badvaddr; |
pte_t *pte; |
int pfrc; |
as_t *as; |
322,7 → 322,7 |
*/ |
bool pht_real_refill(int n, istate_t *istate) |
{ |
__address badvaddr; |
uintptr_t badvaddr; |
if (n == VECTOR_DATA_STORAGE) { |
asm volatile ( |
332,7 → 332,7 |
} else |
badvaddr = istate->pc; |
__u32 physmem; |
uint32_t physmem; |
asm volatile ( |
"mfsprg3 %0\n" |
: "=r" (physmem) |
364,7 → 364,7 |
void tlb_invalidate_asid(asid_t asid) |
{ |
__u32 sdr1; |
uint32_t sdr1; |
asm volatile ( |
"mfsdr1 %0\n" |
: "=r" (sdr1) |
371,7 → 371,7 |
); |
phte_t *phte = (phte_t *) PA2KA(sdr1 & 0xffff0000); |
__u32 i; |
uint32_t i; |
for (i = 0; i < 8192; i++) { |
if ((phte[i].v) && (phte[i].vsid >= (asid << 4)) && (phte[i].vsid < ((asid << 4) + 16))) |
phte[i].v = 0; |
380,7 → 380,7 |
} |
void tlb_invalidate_pages(asid_t asid, __address page, count_t cnt) |
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt) |
{ |
// TODO |
tlb_invalidate_all(); |
395,7 → 395,7 |
); \ |
mask = (upper & 0x1ffc) >> 2; \ |
if (upper & 3) { \ |
__u32 tmp = mask; \ |
uint32_t tmp = mask; \ |
length = 128; \ |
while (tmp) { \ |
if ((tmp & 1) == 0) { \ |
412,10 → 412,10 |
void tlb_print(void) |
{ |
__u32 sr; |
uint32_t sr; |
for (sr = 0; sr < 16; sr++) { |
__u32 vsid; |
uint32_t vsid; |
asm volatile ( |
"mfsrin %0, %1\n" |
: "=r" (vsid) |
424,10 → 424,10 |
printf("vsid[%d]: VSID=%.*p (ASID=%d)%s%s\n", sr, sizeof(vsid) * 2, vsid & 0xffffff, (vsid & 0xffffff) >> 4, ((vsid >> 30) & 1) ? " supervisor" : "", ((vsid >> 29) & 1) ? " user" : ""); |
} |
__u32 upper; |
__u32 lower; |
__u32 mask; |
__u32 length; |
uint32_t upper; |
uint32_t lower; |
uint32_t mask; |
uint32_t length; |
PRINT_BAT("ibat[0]", 528, 529); |
PRINT_BAT("ibat[1]", 530, 531); |
/kernel/trunk/arch/ppc32/src/mm/as.c |
---|
55,7 → 55,7 |
{ |
asid_t asid; |
ipl_t ipl; |
__u32 sr; |
uint32_t sr; |
ipl = interrupts_disable(); |
spinlock_lock(&as->lock); |
/kernel/trunk/arch/ppc32/src/mm/frame.c |
---|
39,7 → 39,7 |
#include <align.h> |
#include <macros.h> |
__address last_frame = 0; |
uintptr_t last_frame = 0; |
void frame_arch_init(void) |
{ |
67,7 → 67,7 |
frame_mark_unavailable(0, 8); |
/* Mark the Page Hash Table frames as unavailable */ |
__u32 sdr1; |
uint32_t sdr1; |
asm volatile ( |
"mfsdr1 %0\n" |
: "=r" (sdr1) |
/kernel/trunk/arch/ppc32/src/mm/page.c |
---|
45,12 → 45,12 |
} |
__address hw_map(__address physaddr, size_t size) |
uintptr_t hw_map(uintptr_t physaddr, size_t size) |
{ |
if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH)) |
panic("Unable to map physical memory %p (%d bytes)", physaddr, size) |
__address virtaddr = PA2KA(last_frame); |
uintptr_t virtaddr = PA2KA(last_frame); |
pfn_t i; |
for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++) |
page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE); |
/kernel/trunk/arch/ppc32/src/interrupt.c |
---|
88,7 → 88,7 |
/* Reregister irq to be IPC-ready */ |
void irq_ipc_bind_arch(__native irq) |
void irq_ipc_bind_arch(unative_t irq) |
{ |
int_register(irq, "ipc_int", ipc_int); |
} |
/kernel/trunk/arch/ppc32/src/drivers/pic.c |
---|
38,11 → 38,11 |
#include <byteorder.h> |
#include <bitops.h> |
static volatile __u32 *pic; |
static volatile uint32_t *pic; |
void pic_init(__address base, size_t size) |
void pic_init(uintptr_t base, size_t size) |
{ |
pic = (__u32 *) hw_map(base, size); |
pic = (uint32_t *) hw_map(base, size); |
} |
/kernel/trunk/arch/ppc32/src/drivers/cuda.c |
---|
59,7 → 59,7 |
#define TIP 0x20 |
static volatile __u8 *cuda = NULL; |
static volatile uint8_t *cuda = NULL; |
static iroutine vector; |
190,10 → 190,10 |
}; |
void send_packet(const __u8 kind, index_t count, ...); |
void send_packet(const uint8_t kind, index_t count, ...); |
static void receive_packet(__u8 *kind, index_t count, __u8 data[]) |
static void receive_packet(uint8_t *kind, index_t count, uint8_t data[]) |
{ |
cuda[B] = cuda[B] & ~TIP; |
*kind = cuda[SR]; |
237,8 → 237,8 |
int cuda_get_scancode(void) |
{ |
__u8 kind; |
__u8 data[4]; |
uint8_t kind; |
uint8_t data[4]; |
receive_packet(&kind, 4, data); |
253,7 → 253,7 |
int scan_code = cuda_get_scancode(); |
if (scan_code != -1) { |
__u8 scancode = (__u8) scan_code; |
uint8_t scancode = (uint8_t) scan_code; |
if ((scancode & 0x80) != 0x80) |
chardev_push_character(&kbrd, lchars[scancode & 0x7f]); |
} |
275,9 → 275,9 |
} |
void cuda_init(__address base, size_t size) |
void cuda_init(uintptr_t base, size_t size) |
{ |
cuda = (__u8 *) hw_map(base, size); |
cuda = (uint8_t *) hw_map(base, size); |
int_register(CUDA_IRQ, "cuda", cuda_irq); |
pic_enable_interrupt(CUDA_IRQ); |
290,7 → 290,7 |
} |
void send_packet(const __u8 kind, index_t count, ...) |
void send_packet(const uint8_t kind, index_t count, ...) |
{ |
index_t i; |
va_list va; |
/kernel/trunk/arch/amd64/include/interrupt.h |
---|
70,26 → 70,26 |
/** This is passed to interrupt handlers */ |
struct istate { |
__u64 rax; |
__u64 rbx; |
__u64 rcx; |
__u64 rdx; |
__u64 rsi; |
__u64 rdi; |
__u64 r8; |
__u64 r9; |
__u64 r10; |
__u64 r11; |
__u64 r12; |
__u64 r13; |
__u64 r14; |
__u64 r15; |
__u64 rbp; |
__u64 error_word; |
__u64 rip; |
__u64 cs; |
__u64 rflags; |
__u64 stack[]; /* Additional data on stack */ |
uint64_t rax; |
uint64_t rbx; |
uint64_t rcx; |
uint64_t rdx; |
uint64_t rsi; |
uint64_t rdi; |
uint64_t r8; |
uint64_t r9; |
uint64_t r10; |
uint64_t r11; |
uint64_t r12; |
uint64_t r13; |
uint64_t r14; |
uint64_t r15; |
uint64_t rbp; |
uint64_t error_word; |
uint64_t rip; |
uint64_t cs; |
uint64_t rflags; |
uint64_t stack[]; /* Additional data on stack */ |
}; |
/** Return true if exception happened while in userspace */ |
98,17 → 98,17 |
return !(istate->rip & 0x8000000000000000); |
} |
static inline void istate_set_retaddr(istate_t *istate, __address retaddr) |
static inline void istate_set_retaddr(istate_t *istate, uintptr_t retaddr) |
{ |
istate->rip = retaddr; |
} |
static inline __native istate_get_pc(istate_t *istate) |
static inline unative_t istate_get_pc(istate_t *istate) |
{ |
return istate->rip; |
} |
extern void (* disable_irqs_function)(__u16 irqmask); |
extern void (* enable_irqs_function)(__u16 irqmask); |
extern void (* disable_irqs_function)(uint16_t irqmask); |
extern void (* enable_irqs_function)(uint16_t irqmask); |
extern void (* eoi_function)(void); |
extern void print_info_errcode(int n, istate_t *istate); |
120,8 → 120,8 |
extern void syscall(int n, istate_t *istate); |
extern void tlb_shootdown_ipi(int n, istate_t *istate); |
extern void trap_virtual_enable_irqs(__u16 irqmask); |
extern void trap_virtual_disable_irqs(__u16 irqmask); |
extern void trap_virtual_enable_irqs(uint16_t irqmask); |
extern void trap_virtual_disable_irqs(uint16_t irqmask); |
extern void trap_virtual_eoi(void); |
/* AMD64 - specific page handler */ |
extern void ident_page_fault(int n, istate_t *istate); |
/kernel/trunk/arch/amd64/include/byteorder.h |
---|
36,8 → 36,8 |
#define __amd64_BYTEORDER_H__ |
/* AMD64 is little-endian */ |
#define __native_le2host(n) (n) |
#define __u64_le2host(n) (n) |
#define unative_t_le2host(n) (n) |
#define uint64_t_le2host(n) (n) |
#endif |
/kernel/trunk/arch/amd64/include/cpuid.h |
---|
47,18 → 47,18 |
#include <arch/types.h> |
struct cpu_info { |
__u32 cpuid_eax; |
__u32 cpuid_ebx; |
__u32 cpuid_ecx; |
__u32 cpuid_edx; |
uint32_t cpuid_eax; |
uint32_t cpuid_ebx; |
uint32_t cpuid_ecx; |
uint32_t cpuid_edx; |
} __attribute__ ((packed)); |
extern int has_cpuid(void); |
extern void cpuid(__u32 cmd, cpu_info_t *info); |
extern void cpuid(uint32_t cmd, cpu_info_t *info); |
extern __u64 rdtsc(void); |
extern uint64_t rdtsc(void); |
#endif /* __ASM__ */ |
#endif |
/kernel/trunk/arch/amd64/include/types.h |
---|
26,7 → 26,7 |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup amd64 |
/** @addtogroup amd64 |
* @{ |
*/ |
/** @file |
37,29 → 37,28 |
#define NULL 0 |
typedef signed char __s8; |
typedef signed short __s16; |
typedef signed int __s32; |
typedef signed long long __s64; |
typedef signed char int8_t; |
typedef signed short int16_t; |
typedef signed int int32_t; |
typedef signed long long int64_t; |
typedef unsigned char __u8; |
typedef unsigned short __u16; |
typedef unsigned int __u32; |
typedef unsigned long long __u64; |
typedef unsigned char uint8_t; |
typedef unsigned short uint16_t; |
typedef unsigned int uint32_t; |
typedef unsigned long long uint64_t; |
typedef __u64 __address; |
typedef __u64 pfn_t; |
typedef uint64_t uintptr_t; |
typedef uint64_t pfn_t; |
/* Flags of processor (return value of interrupts_disable()) */ |
typedef __u64 ipl_t; |
typedef uint64_t ipl_t; |
typedef __u64 __native; |
typedef __s64 __snative; |
typedef uint64_t unative_t; |
typedef int64_t native_t; |
typedef struct page_specifier pte_t; |
#endif |
/** @} |
/** @} |
*/ |
/kernel/trunk/arch/amd64/include/memstr.h |
---|
49,7 → 49,7 |
*/ |
static inline void * memcpy(void * dst, const void * src, size_t cnt) |
{ |
__native d0, d1, d2; |
unative_t d0, d1, d2; |
__asm__ __volatile__( |
"rep movsq\n\t" |
59,7 → 59,7 |
"rep movsb\n\t" |
"1:\n" |
: "=&c" (d0), "=&D" (d1), "=&S" (d2) |
: "0" ((__native)(cnt / 8)), "g" ((__native)cnt), "1" ((__native) dst), "2" ((__native) src) |
: "0" ((unative_t)(cnt / 8)), "g" ((unative_t)cnt), "1" ((unative_t) dst), "2" ((unative_t) src) |
: "memory"); |
return dst; |
79,8 → 79,8 |
*/ |
static inline int memcmp(const void * src, const void * dst, size_t cnt) |
{ |
__native d0, d1, d2; |
__native ret; |
unative_t d0, d1, d2; |
unative_t ret; |
__asm__ ( |
"repe cmpsb\n\t" |
89,7 → 89,7 |
"addq $1, %0\n\t" |
"1:\n" |
: "=a" (ret), "=%S" (d0), "=&D" (d1), "=&c" (d2) |
: "0" (0), "1" (src), "2" (dst), "3" ((__native)cnt) |
: "0" (0), "1" (src), "2" (dst), "3" ((unative_t)cnt) |
); |
return ret; |
104,14 → 104,14 |
* @param cnt Number of words |
* @param x Value to fill |
*/ |
static inline void memsetw(__address dst, size_t cnt, __u16 x) |
static inline void memsetw(uintptr_t dst, size_t cnt, uint16_t x) |
{ |
__native d0, d1; |
unative_t d0, d1; |
__asm__ __volatile__ ( |
"rep stosw\n\t" |
: "=&D" (d0), "=&c" (d1), "=a" (x) |
: "0" (dst), "1" ((__native)cnt), "2" (x) |
: "0" (dst), "1" ((unative_t)cnt), "2" (x) |
: "memory" |
); |
126,14 → 126,14 |
* @param cnt Number of bytes |
* @param x Value to fill |
*/ |
static inline void memsetb(__address dst, size_t cnt, __u8 x) |
static inline void memsetb(uintptr_t dst, size_t cnt, uint8_t x) |
{ |
__native d0, d1; |
unative_t d0, d1; |
__asm__ __volatile__ ( |
"rep stosb\n\t" |
: "=&D" (d0), "=&c" (d1), "=a" (x) |
: "0" (dst), "1" ((__native)cnt), "2" (x) |
: "0" (dst), "1" ((unative_t)cnt), "2" (x) |
: "memory" |
); |
/kernel/trunk/arch/amd64/include/atomic.h |
---|
83,8 → 83,8 |
#define atomic_preinc(val) (atomic_postinc(val)+1) |
#define atomic_predec(val) (atomic_postdec(val)-1) |
static inline __u64 test_and_set(atomic_t *val) { |
__u64 v; |
static inline uint64_t test_and_set(atomic_t *val) { |
uint64_t v; |
__asm__ volatile ( |
"movq $1, %0\n" |
99,7 → 99,7 |
/** amd64 specific fast spinlock */ |
static inline void atomic_lock_arch(atomic_t *val) |
{ |
__u64 tmp; |
uint64_t tmp; |
preemption_disable(); |
__asm__ volatile ( |
/kernel/trunk/arch/amd64/include/pm.h |
---|
140,34 → 140,34 |
typedef struct idescriptor idescriptor_t; |
struct ptr_16_64 { |
__u16 limit; |
__u64 base; |
uint16_t limit; |
uint64_t base; |
} __attribute__ ((packed)); |
typedef struct ptr_16_64 ptr_16_64_t; |
struct ptr_16_32 { |
__u16 limit; |
__u32 base; |
uint16_t limit; |
uint32_t base; |
} __attribute__ ((packed)); |
typedef struct ptr_16_32 ptr_16_32_t; |
struct tss { |
__u32 reserve1; |
__u64 rsp0; |
__u64 rsp1; |
__u64 rsp2; |
__u64 reserve2; |
__u64 ist1; |
__u64 ist2; |
__u64 ist3; |
__u64 ist4; |
__u64 ist5; |
__u64 ist6; |
__u64 ist7; |
__u64 reserve3; |
__u16 reserve4; |
__u16 iomap_base; |
__u8 iomap[TSS_IOMAP_SIZE]; |
uint32_t reserve1; |
uint64_t rsp0; |
uint64_t rsp1; |
uint64_t rsp2; |
uint64_t reserve2; |
uint64_t ist1; |
uint64_t ist2; |
uint64_t ist3; |
uint64_t ist4; |
uint64_t ist5; |
uint64_t ist6; |
uint64_t ist7; |
uint64_t reserve3; |
uint16_t reserve4; |
uint16_t iomap_base; |
uint8_t iomap[TSS_IOMAP_SIZE]; |
} __attribute__ ((packed)); |
typedef struct tss tss_t; |
182,11 → 182,11 |
extern void pm_init(void); |
extern void gdt_tss_setbase(descriptor_t *d, __address base); |
extern void gdt_tss_setlimit(descriptor_t *d, __u32 limit); |
extern void gdt_tss_setbase(descriptor_t *d, uintptr_t base); |
extern void gdt_tss_setlimit(descriptor_t *d, uint32_t limit); |
extern void idt_init(void); |
extern void idt_setoffset(idescriptor_t *d, __address offset); |
extern void idt_setoffset(idescriptor_t *d, uintptr_t offset); |
extern void tss_initialize(tss_t *t); |
/kernel/trunk/arch/amd64/include/proc/thread.h |
---|
38,7 → 38,7 |
#include <arch/types.h> |
typedef struct { |
__native tls; |
unative_t tls; |
} thread_arch_t; |
#endif |
/kernel/trunk/arch/amd64/include/asm.h |
---|
39,8 → 39,8 |
#include <arch/types.h> |
#include <config.h> |
extern void asm_delay_loop(__u32 t); |
extern void asm_fake_loop(__u32 t); |
extern void asm_delay_loop(uint32_t t); |
extern void asm_fake_loop(uint32_t t); |
/** Return base address of current stack. |
* |
48,11 → 48,11 |
* The stack is assumed to be STACK_SIZE bytes long. |
* The stack must start on page boundary. |
*/ |
static inline __address get_stack_base(void) |
static inline uintptr_t get_stack_base(void) |
{ |
__address v; |
uintptr_t v; |
__asm__ volatile ("andq %%rsp, %0\n" : "=r" (v) : "0" (~((__u64)STACK_SIZE-1))); |
__asm__ volatile ("andq %%rsp, %0\n" : "=r" (v) : "0" (~((uint64_t)STACK_SIZE-1))); |
return v; |
} |
68,7 → 68,7 |
* @param port Port to read from |
* @return Value read |
*/ |
static inline __u8 inb(__u16 port) { __u8 val; __asm__ volatile ("inb %w1, %b0 \n" : "=a" (val) : "d" (port) ); return val; } |
static inline uint8_t inb(uint16_t port) { uint8_t val; __asm__ volatile ("inb %w1, %b0 \n" : "=a" (val) : "d" (port) ); return val; } |
/** Byte to port |
* |
77,7 → 77,7 |
* @param port Port to write to |
* @param val Value to write |
*/ |
static inline void outb(__u16 port, __u8 val) { __asm__ volatile ("outb %b0, %w1\n" : : "a" (val), "d" (port) ); } |
static inline void outb(uint16_t port, uint8_t val) { __asm__ volatile ("outb %b0, %w1\n" : : "a" (val), "d" (port) ); } |
/** Swap Hidden part of GS register with visible one */ |
static inline void swapgs(void) { __asm__ volatile("swapgs"); } |
149,23 → 149,23 |
} |
/** Write to MSR */ |
static inline void write_msr(__u32 msr, __u64 value) |
static inline void write_msr(uint32_t msr, uint64_t value) |
{ |
__asm__ volatile ( |
"wrmsr;" : : "c" (msr), |
"a" ((__u32)(value)), |
"d" ((__u32)(value >> 32)) |
"a" ((uint32_t)(value)), |
"d" ((uint32_t)(value >> 32)) |
); |
} |
static inline __native read_msr(__u32 msr) |
static inline unative_t read_msr(uint32_t msr) |
{ |
__u32 ax, dx; |
uint32_t ax, dx; |
__asm__ volatile ( |
"rdmsr;" : "=a"(ax), "=d"(dx) : "c" (msr) |
); |
return ((__u64)dx << 32) | ax; |
return ((uint64_t)dx << 32) | ax; |
} |
187,9 → 187,9 |
); |
} |
static inline __address * get_ip() |
static inline uintptr_t * get_ip() |
{ |
__address *ip; |
uintptr_t *ip; |
__asm__ volatile ( |
"mov %%rip, %0" |
202,9 → 202,9 |
* |
* @param addr Address on a page whose TLB entry is to be invalidated. |
*/ |
static inline void invlpg(__address addr) |
static inline void invlpg(uintptr_t addr) |
{ |
__asm__ volatile ("invlpg %0\n" :: "m" (*((__native *)addr))); |
__asm__ volatile ("invlpg %0\n" :: "m" (*((unative_t *)addr))); |
} |
/** Load GDTR register from memory. |
238,19 → 238,19 |
* |
* @param sel Selector specifying descriptor of TSS segment. |
*/ |
static inline void tr_load(__u16 sel) |
static inline void tr_load(uint16_t sel) |
{ |
__asm__ volatile ("ltr %0" : : "r" (sel)); |
} |
#define GEN_READ_REG(reg) static inline __native read_ ##reg (void) \ |
#define GEN_READ_REG(reg) static inline unative_t read_ ##reg (void) \ |
{ \ |
__native res; \ |
unative_t res; \ |
__asm__ volatile ("movq %%" #reg ", %0" : "=r" (res) ); \ |
return res; \ |
} |
#define GEN_WRITE_REG(reg) static inline void write_ ##reg (__native regn) \ |
#define GEN_WRITE_REG(reg) static inline void write_ ##reg (unative_t regn) \ |
{ \ |
__asm__ volatile ("movq %0, %%" #reg : : "r" (regn)); \ |
} |
/kernel/trunk/arch/amd64/include/faddr.h |
---|
26,7 → 26,7 |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup amd64 |
/** @addtogroup amd64 |
* @{ |
*/ |
/** @file |
37,10 → 37,9 |
#include <arch/types.h> |
#define FADDR(fptr) ((__address) (fptr)) |
#define FADDR(fptr) ((uintptr_t) (fptr)) |
#endif |
/** @} |
/** @} |
*/ |
/kernel/trunk/arch/amd64/include/mm/frame.h |
---|
44,7 → 44,7 |
#ifndef __ASM__ |
extern __address last_frame; |
extern uintptr_t last_frame; |
extern void frame_arch_init(void); |
#endif /* __ASM__ */ |
/kernel/trunk/arch/amd64/include/mm/page.h |
---|
26,7 → 26,7 |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup amd64mm |
/** @addtogroup amd64mm |
* @{ |
*/ |
/** @file |
60,7 → 60,7 |
#endif |
#ifndef __ASM__ |
static inline __address ka2pa(__address x) |
static inline uintptr_t ka2pa(uintptr_t x) |
{ |
if (x > 0xffffffff80000000) |
return x - 0xffffffff80000000; |
67,9 → 67,9 |
else |
return x - 0xffff800000000000; |
} |
# define KA2PA(x) ka2pa((__address)x) |
# define PA2KA_CODE(x) (((__address) (x)) + 0xffffffff80000000) |
# define PA2KA(x) (((__address) (x)) + 0xffff800000000000) |
# define KA2PA(x) ka2pa((uintptr_t)x) |
# define PA2KA_CODE(x) (((uintptr_t) (x)) + 0xffffffff80000000) |
# define PA2KA(x) (((uintptr_t) (x)) + 0xffff800000000000) |
#else |
# define KA2PA(x) ((x) - 0xffffffff80000000) |
# define PA2KA(x) ((x) + 0xffffffff80000000) |
85,12 → 85,12 |
#define PTL2_INDEX_ARCH(vaddr) (((vaddr)>>21)&0x1ff) |
#define PTL3_INDEX_ARCH(vaddr) (((vaddr)>>12)&0x1ff) |
#define GET_PTL1_ADDRESS_ARCH(ptl0, i) ((pte_t *) ((((__u64) ((pte_t *)(ptl0))[(i)].addr_12_31)<<12) | (((__u64) ((pte_t *)(ptl0))[(i)].addr_32_51)<<32 ))) |
#define GET_PTL2_ADDRESS_ARCH(ptl1, i) ((pte_t *) ((((__u64) ((pte_t *)(ptl1))[(i)].addr_12_31)<<12) | (((__u64) ((pte_t *)(ptl1))[(i)].addr_32_51)<<32 ))) |
#define GET_PTL3_ADDRESS_ARCH(ptl2, i) ((pte_t *) ((((__u64) ((pte_t *)(ptl2))[(i)].addr_12_31)<<12) | (((__u64) ((pte_t *)(ptl2))[(i)].addr_32_51)<<32 ))) |
#define GET_FRAME_ADDRESS_ARCH(ptl3, i) ((__address *) ((((__u64) ((pte_t *)(ptl3))[(i)].addr_12_31)<<12) | (((__u64) ((pte_t *)(ptl3))[(i)].addr_32_51)<<32 ))) |
#define GET_PTL1_ADDRESS_ARCH(ptl0, i) ((pte_t *) ((((uint64_t) ((pte_t *)(ptl0))[(i)].addr_12_31)<<12) | (((uint64_t) ((pte_t *)(ptl0))[(i)].addr_32_51)<<32 ))) |
#define GET_PTL2_ADDRESS_ARCH(ptl1, i) ((pte_t *) ((((uint64_t) ((pte_t *)(ptl1))[(i)].addr_12_31)<<12) | (((uint64_t) ((pte_t *)(ptl1))[(i)].addr_32_51)<<32 ))) |
#define GET_PTL3_ADDRESS_ARCH(ptl2, i) ((pte_t *) ((((uint64_t) ((pte_t *)(ptl2))[(i)].addr_12_31)<<12) | (((uint64_t) ((pte_t *)(ptl2))[(i)].addr_32_51)<<32 ))) |
#define GET_FRAME_ADDRESS_ARCH(ptl3, i) ((uintptr_t *) ((((uint64_t) ((pte_t *)(ptl3))[(i)].addr_12_31)<<12) | (((uint64_t) ((pte_t *)(ptl3))[(i)].addr_32_51)<<32 ))) |
#define SET_PTL0_ADDRESS_ARCH(ptl0) (write_cr3((__address) (ptl0))) |
#define SET_PTL0_ADDRESS_ARCH(ptl0) (write_cr3((uintptr_t) (ptl0))) |
#define SET_PTL1_ADDRESS_ARCH(ptl0, i, a) set_pt_addr((pte_t *)(ptl0), (index_t)(i), a) |
#define SET_PTL2_ADDRESS_ARCH(ptl1, i, a) set_pt_addr((pte_t *)(ptl1), (index_t)(i), a) |
#define SET_PTL3_ADDRESS_ARCH(ptl2, i, a) set_pt_addr((pte_t *)(ptl2), (index_t)(i), a) |
106,9 → 106,9 |
#define SET_PTL3_FLAGS_ARCH(ptl2, i, x) set_pt_flags((pte_t *)(ptl2), (index_t)(i), (x)) |
#define SET_FRAME_FLAGS_ARCH(ptl3, i, x) set_pt_flags((pte_t *)(ptl3), (index_t)(i), (x)) |
#define PTE_VALID_ARCH(p) (*((__u64 *) (p)) != 0) |
#define PTE_VALID_ARCH(p) (*((uint64_t *) (p)) != 0) |
#define PTE_PRESENT_ARCH(p) ((p)->present != 0) |
#define PTE_GET_FRAME_ARCH(p) ((((__address)(p)->addr_12_31)<<12) | ((__address)(p)->addr_32_51<<32)) |
#define PTE_GET_FRAME_ARCH(p) ((((uintptr_t)(p)->addr_12_31)<<12) | ((uintptr_t)(p)->addr_32_51<<32)) |
#define PTE_WRITABLE_ARCH(p) ((p)->writeable != 0) |
#define PTE_EXECUTABLE_ARCH(p) ((p)->no_execute == 0) |
164,7 → 164,7 |
); |
} |
static inline void set_pt_addr(pte_t *pt, index_t i, __address a) |
static inline void set_pt_addr(pte_t *pt, index_t i, uintptr_t a) |
{ |
pte_t *p = &pt[i]; |
197,6 → 197,5 |
#endif |
/** @} |
/** @} |
*/ |
/kernel/trunk/arch/amd64/include/context.h |
---|
50,16 → 50,16 |
* during function call |
*/ |
struct context { |
__address sp; |
__address pc; |
uintptr_t sp; |
uintptr_t pc; |
__u64 rbx; |
__u64 rbp; |
uint64_t rbx; |
uint64_t rbp; |
__u64 r12; |
__u64 r13; |
__u64 r14; |
__u64 r15; |
uint64_t r12; |
uint64_t r13; |
uint64_t r14; |
uint64_t r15; |
ipl_t ipl; |
} __attribute__ ((packed)); |
/kernel/trunk/arch/amd64/include/cpu.h |
---|
76,7 → 76,7 |
}; |
extern void set_efer_flag(int flag); |
extern __u64 read_efer_flag(void); |
extern uint64_t read_efer_flag(void); |
void cpu_setup_fpu(void); |
#endif /* __ASM__ */ |
/kernel/trunk/arch/amd64/src/cpu/cpu.c |
---|
124,7 → 124,7 |
void cpu_arch_init(void) |
{ |
CPU->arch.tss = tss_p; |
CPU->arch.tss->iomap_base = &CPU->arch.tss->iomap[0] - ((__u8 *) CPU->arch.tss); |
CPU->arch.tss->iomap_base = &CPU->arch.tss->iomap[0] - ((uint8_t *) CPU->arch.tss); |
CPU->fpu_owner = NULL; |
} |
/kernel/trunk/arch/amd64/src/syscall.c |
---|
57,9 → 57,9 |
* +0(KDATA_DES), +8(UDATA_DES), +16(UTEXT_DES) |
*/ |
write_msr(AMD_MSR_STAR, |
((__u64)(gdtselector(KDATA_DES) | PL_USER)<<48) \ |
| ((__u64)(gdtselector(KTEXT_DES) | PL_KERNEL)<<32)); |
write_msr(AMD_MSR_LSTAR, (__u64)syscall_entry); |
((uint64_t)(gdtselector(KDATA_DES) | PL_USER)<<48) \ |
| ((uint64_t)(gdtselector(KTEXT_DES) | PL_KERNEL)<<32)); |
write_msr(AMD_MSR_LSTAR, (uint64_t)syscall_entry); |
/* Mask RFLAGS on syscall |
* - disable interrupts, until we exchange the stack register |
* (mask the IE bit) |
/kernel/trunk/arch/amd64/src/amd64.c |
---|
186,7 → 186,7 |
* The specs say, that on %fs:0 there is stored contents of %fs register, |
* we need not to go to CPL0 to read it. |
*/ |
__native sys_tls_set(__native addr) |
unative_t sys_tls_set(unative_t addr) |
{ |
THREAD->arch.tls = addr; |
write_msr(AMD_MSR_FS, addr); |
/kernel/trunk/arch/amd64/src/pm.c |
---|
123,13 → 123,13 |
idescriptor_t idt[IDT_ITEMS]; |
ptr_16_64_t gdtr = {.limit = sizeof(gdt), .base= (__u64) gdt }; |
ptr_16_64_t idtr = {.limit = sizeof(idt), .base= (__u64) idt }; |
ptr_16_64_t gdtr = {.limit = sizeof(gdt), .base= (uint64_t) gdt }; |
ptr_16_64_t idtr = {.limit = sizeof(idt), .base= (uint64_t) idt }; |
static tss_t tss; |
tss_t *tss_p = NULL; |
void gdt_tss_setbase(descriptor_t *d, __address base) |
void gdt_tss_setbase(descriptor_t *d, uintptr_t base) |
{ |
tss_descriptor_t *td = (tss_descriptor_t *) d; |
139,7 → 139,7 |
td->base_32_63 = ((base) >> 32); |
} |
void gdt_tss_setlimit(descriptor_t *d, __u32 limit) |
void gdt_tss_setlimit(descriptor_t *d, uint32_t limit) |
{ |
struct tss_descriptor *td = (tss_descriptor_t *) d; |
147,7 → 147,7 |
td->limit_16_19 = (limit >> 16) & 0xf; |
} |
void idt_setoffset(idescriptor_t *d, __address offset) |
void idt_setoffset(idescriptor_t *d, uintptr_t offset) |
{ |
/* |
* Offset is a linear address. |
159,7 → 159,7 |
void tss_initialize(tss_t *t) |
{ |
memsetb((__address) t, sizeof(tss_t), 0); |
memsetb((uintptr_t) t, sizeof(tss_t), 0); |
} |
/* |
179,7 → 179,7 |
d->present = 1; |
d->type = AR_INTERRUPT; /* masking interrupt */ |
idt_setoffset(d, ((__address) interrupt_handlers) + i*interrupt_handler_size); |
idt_setoffset(d, ((uintptr_t) interrupt_handlers) + i*interrupt_handler_size); |
exc_register(i, "undef", (iroutine)null_interrupt); |
} |
214,7 → 214,7 |
/* We are going to use malloc, which may return |
* non boot-mapped pointer, initialize the CR3 register |
* ahead of page_init */ |
write_cr3((__address) AS_KERNEL->page_table); |
write_cr3((uintptr_t) AS_KERNEL->page_table); |
tss_p = (struct tss *) malloc(sizeof(tss_t), FRAME_ATOMIC); |
if (!tss_p) |
228,7 → 228,7 |
tss_desc->type = AR_TSS; |
tss_desc->dpl = PL_KERNEL; |
gdt_tss_setbase(&gdt_p[TSS_DES], (__address) tss_p); |
gdt_tss_setbase(&gdt_p[TSS_DES], (uintptr_t) tss_p); |
gdt_tss_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE - 1); |
gdtr_load(&gdtr); |
/kernel/trunk/arch/amd64/src/ddi/ddi.c |
---|
55,7 → 55,7 |
* |
* @return 0 on success or an error code from errno.h. |
*/ |
int ddi_iospace_enable_arch(task_t *task, __address ioaddr, size_t size) |
int ddi_iospace_enable_arch(task_t *task, uintptr_t ioaddr, size_t size) |
{ |
count_t bits; |
65,13 → 65,13 |
if (task->arch.iomap.bits < bits) { |
bitmap_t oldiomap; |
__u8 *newmap; |
uint8_t *newmap; |
/* |
* The I/O permission bitmap is too small and needs to be grown. |
*/ |
newmap = (__u8 *) malloc(BITS2BYTES(bits), FRAME_ATOMIC); |
newmap = (uint8_t *) malloc(BITS2BYTES(bits), FRAME_ATOMIC); |
if (!newmap) |
return ENOMEM; |
/kernel/trunk/arch/amd64/src/proc/scheduler.c |
---|
56,12 → 56,12 |
/** Perform amd64 specific tasks needed before the new thread is scheduled. */ |
void before_thread_runs_arch(void) |
{ |
CPU->arch.tss->rsp0 = (__address) &THREAD->kstack[THREAD_STACK_SIZE-SP_DELTA]; |
CPU->arch.tss->rsp0 = (uintptr_t) &THREAD->kstack[THREAD_STACK_SIZE-SP_DELTA]; |
/* Syscall support - write address of thread stack pointer to |
* hidden part of gs */ |
swapgs(); |
write_msr(AMD_MSR_GS, (__u64)&THREAD->kstack); |
write_msr(AMD_MSR_GS, (uint64_t)&THREAD->kstack); |
swapgs(); |
/* TLS support - set FS to thread local storage */ |
/kernel/trunk/arch/amd64/src/debugger.c |
---|
46,7 → 46,7 |
#include <smp/ipi.h> |
typedef struct { |
__address address; /**< Breakpoint address */ |
uintptr_t address; /**< Breakpoint address */ |
int flags; /**< Flags regarding breakpoint */ |
int counter; /**< How many times the exception occured */ |
} bpinfo_t; |
122,7 → 122,7 |
/* Setup DR register according to table */ |
static void setup_dr(int curidx) |
{ |
__native dr7; |
unative_t dr7; |
bpinfo_t *cur = &breakpoints[curidx]; |
int flags = breakpoints[curidx].flags; |
153,14 → 153,14 |
; |
} else { |
if (sizeof(int) == 4) |
dr7 |= ((__native) 0x3) << (18 + 4*curidx); |
dr7 |= ((unative_t) 0x3) << (18 + 4*curidx); |
else /* 8 */ |
dr7 |= ((__native) 0x2) << (18 + 4*curidx); |
dr7 |= ((unative_t) 0x2) << (18 + 4*curidx); |
if ((flags & BKPOINT_WRITE)) |
dr7 |= ((__native) 0x1) << (16 + 4*curidx); |
dr7 |= ((unative_t) 0x1) << (16 + 4*curidx); |
else if ((flags & BKPOINT_READ_WRITE)) |
dr7 |= ((__native) 0x3) << (16 + 4*curidx); |
dr7 |= ((unative_t) 0x3) << (16 + 4*curidx); |
} |
/* Enable global breakpoint */ |
205,7 → 205,7 |
} |
cur = &breakpoints[curidx]; |
cur->address = (__address) where; |
cur->address = (uintptr_t) where; |
cur->flags = flags; |
cur->counter = 0; |
235,13 → 235,13 |
/* Handle zero checker */ |
if (! (breakpoints[slot].flags & BKPOINT_INSTR)) { |
if ((breakpoints[slot].flags & BKPOINT_CHECK_ZERO)) { |
if (*((__native *) breakpoints[slot].address) != 0) |
if (*((unative_t *) breakpoints[slot].address) != 0) |
return; |
printf("**** Found ZERO on address %p ****\n", |
slot, breakpoints[slot].address); |
} else { |
printf("Data watchpoint - new data: %p\n", |
*((__native *) breakpoints[slot].address)); |
*((unative_t *) breakpoints[slot].address)); |
} |
} |
printf("Reached breakpoint %d:%p(%s)\n", slot, getip(istate), |
315,7 → 315,7 |
static void debug_exception(int n, istate_t *istate) |
{ |
__native dr6; |
unative_t dr6; |
int i; |
/* Set RF to restart the instruction */ |
/kernel/trunk/arch/amd64/src/mm/memory_init.c |
---|
37,9 → 37,9 |
#include <arch/mm/page.h> |
#include <print.h> |
__u8 e820counter = 0xff; |
uint8_t e820counter = 0xff; |
struct e820memmap_ e820table[MEMMAP_E820_MAX_RECORDS]; |
__u32 e801memorysize; |
uint32_t e801memorysize; |
size_t get_memory_size(void) |
{ |
48,7 → 48,7 |
void memory_print_map(void) |
{ |
__u8 i; |
uint8_t i; |
for (i=0;i<e820counter;i++) { |
printf("E820 base: %#llx size: %#llx type: ", e820table[i].base_address, e820table[i].size); |
/kernel/trunk/arch/amd64/src/mm/page.c |
---|
62,19 → 62,19 |
#define PTL3_ADDR(ptl2, page) ((pte_t *)PA2KA(GET_PTL3_ADDRESS_ARCH(ptl2, PTL2_INDEX_ARCH(page)))) |
#define SETUP_PTL1(ptl0, page, tgt) { \ |
SET_PTL1_ADDRESS_ARCH(ptl0, PTL0_INDEX_ARCH(page), (__address)KA2PA(tgt)); \ |
SET_PTL1_ADDRESS_ARCH(ptl0, PTL0_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \ |
SET_PTL1_FLAGS_ARCH(ptl0, PTL0_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \ |
} |
#define SETUP_PTL2(ptl1, page, tgt) { \ |
SET_PTL2_ADDRESS_ARCH(ptl1, PTL1_INDEX_ARCH(page), (__address)KA2PA(tgt)); \ |
SET_PTL2_ADDRESS_ARCH(ptl1, PTL1_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \ |
SET_PTL2_FLAGS_ARCH(ptl1, PTL1_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \ |
} |
#define SETUP_PTL3(ptl2, page, tgt) { \ |
SET_PTL3_ADDRESS_ARCH(ptl2, PTL2_INDEX_ARCH(page), (__address)KA2PA(tgt)); \ |
SET_PTL3_ADDRESS_ARCH(ptl2, PTL2_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \ |
SET_PTL3_FLAGS_ARCH(ptl2, PTL2_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \ |
} |
#define SETUP_FRAME(ptl3, page, tgt) { \ |
SET_FRAME_ADDRESS_ARCH(ptl3, PTL3_INDEX_ARCH(page), (__address)KA2PA(tgt)); \ |
SET_FRAME_ADDRESS_ARCH(ptl3, PTL3_INDEX_ARCH(page), (uintptr_t)KA2PA(tgt)); \ |
SET_FRAME_FLAGS_ARCH(ptl3, PTL3_INDEX_ARCH(page), PAGE_WRITE | PAGE_EXEC); \ |
} |
81,7 → 81,7 |
void page_arch_init(void) |
{ |
__address cur; |
uintptr_t cur; |
int i; |
int identity_flags = PAGE_CACHEABLE | PAGE_EXEC | PAGE_GLOBAL; |
109,10 → 109,10 |
} |
exc_register(14, "page_fault", (iroutine)page_fault); |
write_cr3((__address) AS_KERNEL->page_table); |
write_cr3((uintptr_t) AS_KERNEL->page_table); |
} |
else { |
write_cr3((__address) AS_KERNEL->page_table); |
write_cr3((uintptr_t) AS_KERNEL->page_table); |
} |
} |
125,8 → 125,8 |
*/ |
void ident_page_fault(int n, istate_t *istate) |
{ |
__address page; |
static __address oldpage = 0; |
uintptr_t page; |
static uintptr_t oldpage = 0; |
pte_t *aptl_1, *aptl_2, *aptl_3; |
page = read_cr2(); |
173,7 → 173,7 |
void page_fault(int n, istate_t *istate) |
{ |
__address page; |
uintptr_t page; |
pf_access_t access; |
page = read_cr2(); |
198,12 → 198,12 |
} |
__address hw_map(__address physaddr, size_t size) |
uintptr_t hw_map(uintptr_t physaddr, size_t size) |
{ |
if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH)) |
panic("Unable to map physical memory %p (%d bytes)", physaddr, size) |
__address virtaddr = PA2KA(last_frame); |
uintptr_t virtaddr = PA2KA(last_frame); |
pfn_t i; |
for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++) |
page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE); |
/kernel/trunk/arch/amd64/src/interrupt.c |
---|
56,7 → 56,7 |
void print_info_errcode(int n, istate_t *istate) |
{ |
char *symbol; |
/* __u64 *x = &istate->stack[0]; */ |
/* uint64_t *x = &istate->stack[0]; */ |
if (!(symbol=get_symtab_entry(istate->rip))) |
symbol = ""; |
79,8 → 79,8 |
* Interrupt and exception dispatching. |
*/ |
void (* disable_irqs_function)(__u16 irqmask) = NULL; |
void (* enable_irqs_function)(__u16 irqmask) = NULL; |
void (* disable_irqs_function)(uint16_t irqmask) = NULL; |
void (* enable_irqs_function)(uint16_t irqmask) = NULL; |
void (* eoi_function)(void) = NULL; |
void null_interrupt(int n, istate_t *istate) |
141,7 → 141,7 |
tlb_shootdown_ipi_recv(); |
} |
void trap_virtual_enable_irqs(__u16 irqmask) |
void trap_virtual_enable_irqs(uint16_t irqmask) |
{ |
if (enable_irqs_function) |
enable_irqs_function(irqmask); |
149,7 → 149,7 |
panic("no enable_irqs_function\n"); |
} |
void trap_virtual_disable_irqs(__u16 irqmask) |
void trap_virtual_disable_irqs(uint16_t irqmask) |
{ |
if (disable_irqs_function) |
disable_irqs_function(irqmask); |
174,7 → 174,7 |
/* Reregister irq to be IPC-ready */ |
void irq_ipc_bind_arch(__native irq) |
void irq_ipc_bind_arch(unative_t irq) |
{ |
if (irq == IRQ_CLK) |
return; |
/kernel/trunk/arch/ppc64/include/exception.h |
---|
42,47 → 42,47 |
#include <typedefs.h> |
struct istate { |
__u64 r0; |
__u64 r2; |
__u64 r3; |
__u64 r4; |
__u64 r5; |
__u64 r6; |
__u64 r7; |
__u64 r8; |
__u64 r9; |
__u64 r10; |
__u64 r11; |
__u64 r13; |
__u64 r14; |
__u64 r15; |
__u64 r16; |
__u64 r17; |
__u64 r18; |
__u64 r19; |
__u64 r20; |
__u64 r21; |
__u64 r22; |
__u64 r23; |
__u64 r24; |
__u64 r25; |
__u64 r26; |
__u64 r27; |
__u64 r28; |
__u64 r29; |
__u64 r30; |
__u64 r31; |
__u64 cr; |
__u64 pc; |
__u64 srr1; |
__u64 lr; |
__u64 ctr; |
__u64 xer; |
__u64 r12; |
__u64 sp; |
uint64_t r0; |
uint64_t r2; |
uint64_t r3; |
uint64_t r4; |
uint64_t r5; |
uint64_t r6; |
uint64_t r7; |
uint64_t r8; |
uint64_t r9; |
uint64_t r10; |
uint64_t r11; |
uint64_t r13; |
uint64_t r14; |
uint64_t r15; |
uint64_t r16; |
uint64_t r17; |
uint64_t r18; |
uint64_t r19; |
uint64_t r20; |
uint64_t r21; |
uint64_t r22; |
uint64_t r23; |
uint64_t r24; |
uint64_t r25; |
uint64_t r26; |
uint64_t r27; |
uint64_t r28; |
uint64_t r29; |
uint64_t r30; |
uint64_t r31; |
uint64_t cr; |
uint64_t pc; |
uint64_t srr1; |
uint64_t lr; |
uint64_t ctr; |
uint64_t xer; |
uint64_t r12; |
uint64_t sp; |
}; |
static inline void istate_set_retaddr(istate_t *istate, __address retaddr) |
static inline void istate_set_retaddr(istate_t *istate, uintptr_t retaddr) |
{ |
istate->pc = retaddr; |
} |
93,7 → 93,7 |
panic("istate_from_uspace not yet implemented"); |
return 0; |
} |
static inline __native istate_get_pc(istate_t *istate) |
static inline unative_t istate_get_pc(istate_t *istate) |
{ |
return istate->pc; |
} |
/kernel/trunk/arch/ppc64/include/fpu_context.h |
---|
40,25 → 40,25 |
#endif |
struct fpu_context { |
__u64 fr14; |
__u64 fr15; |
__u64 fr16; |
__u64 fr17; |
__u64 fr18; |
__u64 fr19; |
__u64 fr20; |
__u64 fr21; |
__u64 fr22; |
__u64 fr23; |
__u64 fr24; |
__u64 fr25; |
__u64 fr26; |
__u64 fr27; |
__u64 fr28; |
__u64 fr29; |
__u64 fr30; |
__u64 fr31; |
__u32 fpscr; |
uint64_t fr14; |
uint64_t fr15; |
uint64_t fr16; |
uint64_t fr17; |
uint64_t fr18; |
uint64_t fr19; |
uint64_t fr20; |
uint64_t fr21; |
uint64_t fr22; |
uint64_t fr23; |
uint64_t fr24; |
uint64_t fr25; |
uint64_t fr26; |
uint64_t fr27; |
uint64_t fr28; |
uint64_t fr29; |
uint64_t fr30; |
uint64_t fr31; |
uint32_t fpscr; |
} __attribute__ ((packed)); |
#endif |
/kernel/trunk/arch/ppc64/include/byteorder.h |
---|
40,24 → 40,24 |
#define BIG_ENDIAN |
static inline __u64 __u64_le2host(__u64 n) |
static inline uint64_t uint64_t_le2host(uint64_t n) |
{ |
return __u64_byteorder_swap(n); |
return uint64_t_byteorder_swap(n); |
} |
/** Convert little-endian __native to host __native |
/** Convert little-endian unative_t to host unative_t |
* |
* Convert little-endian __native parameter to host endianess. |
* Convert little-endian unative_t parameter to host endianess. |
* |
* @param n Little-endian __native argument. |
* @param n Little-endian unative_t argument. |
* |
* @return Result in host endianess. |
* |
*/ |
static inline __native __native_le2host(__native n) |
static inline unative_t unative_t_le2host(unative_t n) |
{ |
__address v; |
uintptr_t v; |
asm volatile ( |
"lwbrx %0, %1, %2\n" |
/kernel/trunk/arch/ppc64/include/cpuid.h |
---|
38,8 → 38,8 |
#include <arch/types.h> |
struct cpu_info { |
__u16 version; |
__u16 revision; |
uint16_t version; |
uint16_t revision; |
} __attribute__ ((packed)); |
static inline void cpu_version(struct cpu_info *info) |
/kernel/trunk/arch/ppc64/include/types.h |
---|
37,22 → 37,22 |
#define NULL 0 |
typedef signed char __s8; |
typedef signed short __s16; |
typedef signed int __s32; |
typedef signed long __s64; |
typedef signed char int8_t; |
typedef signed short int16_t; |
typedef signed int int32_t; |
typedef signed long int64_t; |
typedef unsigned char __u8; |
typedef unsigned short __u16; |
typedef unsigned int __u32; |
typedef unsigned long __u64; |
typedef unsigned char uint8_t; |
typedef unsigned short uint16_t; |
typedef unsigned int uint32_t; |
typedef unsigned long uint64_t; |
typedef __u64 __address; |
typedef __u64 pfn_t; |
typedef uint64_t uintptr_t; |
typedef uint64_t pfn_t; |
typedef __u64 ipl_t; |
typedef uint64_t ipl_t; |
typedef __u64 __native; |
typedef uint64_t unative_t; |
/** Page Table Entry. */ |
typedef struct { |
/kernel/trunk/arch/ppc64/include/memstr.h |
---|
37,10 → 37,10 |
#define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt)) |
extern void memsetw(__address dst, size_t cnt, __u16 x); |
extern void memsetb(__address dst, size_t cnt, __u8 x); |
extern void memsetw(uintptr_t dst, size_t cnt, uint16_t x); |
extern void memsetb(uintptr_t dst, size_t cnt, uint8_t x); |
extern int memcmp(__address src, __address dst, int cnt); |
extern int memcmp(uintptr_t src, uintptr_t dst, int cnt); |
#endif |
/kernel/trunk/arch/ppc64/include/boot/boot.h |
---|
48,28 → 48,28 |
#include <arch/types.h> |
typedef struct { |
__address addr; |
__u64 size; |
uintptr_t addr; |
uint64_t size; |
} utask_t; |
typedef struct { |
__u32 count; |
uint32_t count; |
utask_t tasks[TASKMAP_MAX_RECORDS]; |
} taskmap_t; |
typedef struct { |
__address start; |
__u64 size; |
uintptr_t start; |
uint64_t size; |
} memzone_t; |
typedef struct { |
__u64 total; |
__u32 count; |
uint64_t total; |
uint32_t count; |
memzone_t zones[MEMMAP_MAX_RECORDS]; |
} memmap_t; |
typedef struct { |
__address addr; |
uintptr_t addr; |
unsigned int width; |
unsigned int height; |
unsigned int bpp; |
/kernel/trunk/arch/ppc64/include/faddr.h |
---|
37,7 → 37,7 |
#include <arch/types.h> |
#define FADDR(fptr) ((__address) (fptr)) |
#define FADDR(fptr) ((uintptr_t) (fptr)) |
#endif |
/kernel/trunk/arch/ppc64/include/asm.h |
---|
128,9 → 128,9 |
* The stack is assumed to be STACK_SIZE bytes long. |
* The stack must start on page boundary. |
*/ |
static inline __address get_stack_base(void) |
static inline uintptr_t get_stack_base(void) |
{ |
__address v; |
uintptr_t v; |
asm volatile ( |
"and %0, %%sp, %1\n" |
151,9 → 151,9 |
); |
} |
void asm_delay_loop(__u32 t); |
void asm_delay_loop(uint32_t t); |
extern void userspace_asm(__address uspace_uarg, __address stack, __address entry); |
extern void userspace_asm(uintptr_t uspace_uarg, uintptr_t stack, uintptr_t entry); |
#endif |
/kernel/trunk/arch/ppc64/include/mm/frame.h |
---|
43,7 → 43,7 |
#include <arch/types.h> |
extern __address last_frame; |
extern uintptr_t last_frame; |
extern void frame_arch_init(void); |
/kernel/trunk/arch/ppc64/include/mm/page.h |
---|
43,8 → 43,8 |
#ifdef KERNEL |
#ifndef __ASM__ |
# define KA2PA(x) (((__address) (x)) - 0x80000000) |
# define PA2KA(x) (((__address) (x)) + 0x80000000) |
# define KA2PA(x) (((uintptr_t) (x)) - 0x80000000) |
# define PA2KA(x) (((uintptr_t) (x)) + 0x80000000) |
#else |
# define KA2PA(x) ((x) - 0x80000000) |
# define PA2KA(x) ((x) + 0x80000000) |
94,9 → 94,9 |
#define SET_PTL3_FLAGS_ARCH(ptl2, i, x) |
#define SET_FRAME_FLAGS_ARCH(ptl3, i, x) set_pt_flags((pte_t *) (ptl3), (index_t) (i), (x)) |
#define PTE_VALID_ARCH(pte) (*((__u32 *) (pte)) != 0) |
#define PTE_VALID_ARCH(pte) (*((uint32_t *) (pte)) != 0) |
#define PTE_PRESENT_ARCH(pte) ((pte)->p != 0) |
#define PTE_GET_FRAME_ARCH(pte) ((__address) ((pte)->pfn << 12)) |
#define PTE_GET_FRAME_ARCH(pte) ((uintptr_t) ((pte)->pfn << 12)) |
#define PTE_WRITABLE_ARCH(pte) 1 |
#define PTE_EXECUTABLE_ARCH(pte) 1 |
/kernel/trunk/arch/ppc64/include/context.h |
---|
42,31 → 42,31 |
#define SP_DELTA 16 |
struct context { |
__address sp; |
__address pc; |
uintptr_t sp; |
uintptr_t pc; |
__u64 r2; |
__u64 r13; |
__u64 r14; |
__u64 r15; |
__u64 r16; |
__u64 r17; |
__u64 r18; |
__u64 r19; |
__u64 r20; |
__u64 r21; |
__u64 r22; |
__u64 r23; |
__u64 r24; |
__u64 r25; |
__u64 r26; |
__u64 r27; |
__u64 r28; |
__u64 r29; |
__u64 r30; |
__u64 r31; |
uint64_t r2; |
uint64_t r13; |
uint64_t r14; |
uint64_t r15; |
uint64_t r16; |
uint64_t r17; |
uint64_t r18; |
uint64_t r19; |
uint64_t r20; |
uint64_t r21; |
uint64_t r22; |
uint64_t r23; |
uint64_t r24; |
uint64_t r25; |
uint64_t r26; |
uint64_t r27; |
uint64_t r28; |
uint64_t r29; |
uint64_t r30; |
uint64_t r31; |
__u64 cr; |
uint64_t cr; |
ipl_t ipl; |
} __attribute__ ((packed)); |
/kernel/trunk/arch/ppc64/src/ddi/ddi.c |
---|
47,7 → 47,7 |
* |
* @return 0 on success or an error code from errno.h. |
*/ |
int ddi_iospace_enable_arch(task_t *task, __address ioaddr, size_t size) |
int ddi_iospace_enable_arch(task_t *task, uintptr_t ioaddr, size_t size) |
{ |
return 0; |
} |
/kernel/trunk/arch/ppc64/src/mm/tlb.c |
---|
70,7 → 70,7 |
* @param page Address of the first page whose entry is to be invalidated. |
* @param cnt Number of entries to invalidate. |
*/ |
void tlb_invalidate_pages(asid_t asid, __address page, count_t cnt) |
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt) |
{ |
tlb_invalidate_all(); |
} |
/kernel/trunk/arch/ppc64/src/mm/frame.c |
---|
39,7 → 39,7 |
#include <align.h> |
#include <macros.h> |
__address last_frame = 0; |
uintptr_t last_frame = 0; |
void frame_arch_init(void) |
{ |
/kernel/trunk/arch/ppc64/src/mm/page.c |
---|
65,7 → 65,7 |
* @return PTE on success, NULL otherwise. |
* |
*/ |
static pte_t *find_mapping_and_check(as_t *as, bool lock, __address badvaddr, int access, |
static pte_t *find_mapping_and_check(as_t *as, bool lock, uintptr_t badvaddr, int access, |
istate_t *istate, int *pfrc) |
{ |
/* |
113,7 → 113,7 |
} |
static void pht_refill_fail(__address badvaddr, istate_t *istate) |
static void pht_refill_fail(uintptr_t badvaddr, istate_t *istate) |
{ |
char *symbol = ""; |
char *sym2 = ""; |
128,11 → 128,11 |
} |
static void pht_insert(const __address vaddr, const pfn_t pfn) |
static void pht_insert(const uintptr_t vaddr, const pfn_t pfn) |
{ |
__u32 page = (vaddr >> 12) & 0xffff; |
__u32 api = (vaddr >> 22) & 0x3f; |
__u32 vsid; |
uint32_t page = (vaddr >> 12) & 0xffff; |
uint32_t api = (vaddr >> 22) & 0x3f; |
uint32_t vsid; |
asm volatile ( |
"mfsrin %0, %1\n" |
141,10 → 141,10 |
); |
/* Primary hash (xor) */ |
__u32 h = 0; |
__u32 hash = vsid ^ page; |
__u32 base = (hash & 0x3ff) << 3; |
__u32 i; |
uint32_t h = 0; |
uint32_t hash = vsid ^ page; |
uint32_t base = (hash & 0x3ff) << 3; |
uint32_t i; |
bool found = false; |
/* Find unused or colliding |
158,7 → 158,7 |
if (!found) { |
/* Secondary hash (not) */ |
__u32 base2 = (~hash & 0x3ff) << 3; |
uint32_t base2 = (~hash & 0x3ff) << 3; |
/* Find unused or colliding |
PTE in PTEG */ |
196,7 → 196,7 |
*/ |
void pht_refill(bool data, istate_t *istate) |
{ |
__address badvaddr; |
uintptr_t badvaddr; |
pte_t *pte; |
int pfrc; |
as_t *as; |
252,7 → 252,7 |
void pht_init(void) |
{ |
memsetb((__address) phte, 1 << PHT_BITS, 0); |
memsetb((uintptr_t) phte, 1 << PHT_BITS, 0); |
} |
261,7 → 261,7 |
if (config.cpu_active == 1) { |
page_mapping_operations = &pt_mapping_operations; |
__address cur; |
uintptr_t cur; |
int flags; |
/* Frames below 128 MB are mapped using BAT, |
276,24 → 276,24 |
/* Allocate page hash table */ |
phte_t *physical_phte = (phte_t *) frame_alloc(PHT_ORDER, FRAME_KA | FRAME_ATOMIC); |
ASSERT((__address) physical_phte % (1 << PHT_BITS) == 0); |
ASSERT((uintptr_t) physical_phte % (1 << PHT_BITS) == 0); |
pht_init(); |
asm volatile ( |
"mtsdr1 %0\n" |
: |
: "r" ((__address) physical_phte) |
: "r" ((uintptr_t) physical_phte) |
); |
} |
} |
__address hw_map(__address physaddr, size_t size) |
uintptr_t hw_map(uintptr_t physaddr, size_t size) |
{ |
if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH)) |
panic("Unable to map physical memory %p (%d bytes)", physaddr, size) |
__address virtaddr = PA2KA(last_frame); |
uintptr_t virtaddr = PA2KA(last_frame); |
pfn_t i; |
for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++) |
page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE); |
/kernel/trunk/arch/ppc64/src/interrupt.c |
---|
65,7 → 65,7 |
/* Reregister irq to be IPC-ready */ |
void irq_ipc_bind_arch(__native irq) |
void irq_ipc_bind_arch(unative_t irq) |
{ |
panic("not implemented\n"); |
/* TODO */ |
/kernel/trunk/arch/ppc64/src/ppc64.c |
---|
48,7 → 48,7 |
/* Setup usermode */ |
init.cnt = bootinfo.taskmap.count; |
__u32 i; |
uint32_t i; |
for (i = 0; i < bootinfo.taskmap.count; i++) { |
init.tasks[i].addr = PA2KA(bootinfo.taskmap.tasks[i].addr); |
90,7 → 90,7 |
void userspace(uspace_arg_t *kernel_uarg) |
{ |
userspace_asm((__address) kernel_uarg->uspace_uarg, (__address) kernel_uarg->uspace_stack + THREAD_STACK_SIZE - SP_DELTA, (__address) kernel_uarg->uspace_entry); |
userspace_asm((uintptr_t) kernel_uarg->uspace_uarg, (uintptr_t) kernel_uarg->uspace_stack + THREAD_STACK_SIZE - SP_DELTA, (uintptr_t) kernel_uarg->uspace_entry); |
/* Unreachable */ |
for (;;) |
/kernel/trunk/arch/mips32/include/exception.h |
---|
62,45 → 62,45 |
#define EXC_VCED 31 |
struct istate { |
__u32 at; |
__u32 v0; |
__u32 v1; |
__u32 a0; |
__u32 a1; |
__u32 a2; |
__u32 a3; |
__u32 t0; |
__u32 t1; |
__u32 t2; |
__u32 t3; |
__u32 t4; |
__u32 t5; |
__u32 t6; |
__u32 t7; |
__u32 s0; |
__u32 s1; |
__u32 s2; |
__u32 s3; |
__u32 s4; |
__u32 s5; |
__u32 s6; |
__u32 s7; |
__u32 t8; |
__u32 t9; |
__u32 gp; |
__u32 sp; |
__u32 s8; |
__u32 ra; |
uint32_t at; |
uint32_t v0; |
uint32_t v1; |
uint32_t a0; |
uint32_t a1; |
uint32_t a2; |
uint32_t a3; |
uint32_t t0; |
uint32_t t1; |
uint32_t t2; |
uint32_t t3; |
uint32_t t4; |
uint32_t t5; |
uint32_t t6; |
uint32_t t7; |
uint32_t s0; |
uint32_t s1; |
uint32_t s2; |
uint32_t s3; |
uint32_t s4; |
uint32_t s5; |
uint32_t s6; |
uint32_t s7; |
uint32_t t8; |
uint32_t t9; |
uint32_t gp; |
uint32_t sp; |
uint32_t s8; |
uint32_t ra; |
__u32 lo; |
__u32 hi; |
uint32_t lo; |
uint32_t hi; |
__u32 status; /* cp0_status */ |
__u32 epc; /* cp0_epc */ |
__u32 k1; /* We use it as thread-local pointer */ |
uint32_t status; /* cp0_status */ |
uint32_t epc; /* cp0_epc */ |
uint32_t k1; /* We use it as thread-local pointer */ |
}; |
static inline void istate_set_retaddr(istate_t *istate, __address retaddr) |
static inline void istate_set_retaddr(istate_t *istate, uintptr_t retaddr) |
{ |
istate->epc = retaddr; |
} |
110,7 → 110,7 |
{ |
return istate->status & cp0_status_um_bit; |
} |
static inline __native istate_get_pc(istate_t *istate) |
static inline unative_t istate_get_pc(istate_t *istate) |
{ |
return istate->epc; |
} |
/kernel/trunk/arch/mips32/include/fpu_context.h |
---|
37,11 → 37,11 |
#include <arch/types.h> |
#define FPU_CONTEXT_ALIGN sizeof(__native) |
#define FPU_CONTEXT_ALIGN sizeof(unative_t) |
struct fpu_context { |
__native dregs[32]; |
__native cregs[32]; |
unative_t dregs[32]; |
unative_t cregs[32]; |
}; |
#endif |
/kernel/trunk/arch/mips32/include/byteorder.h |
---|
39,19 → 39,19 |
#include <byteorder.h> |
#ifdef BIG_ENDIAN |
static inline __u64 __u64_le2host(__u64 n) |
static inline uint64_t uint64_t_le2host(uint64_t n) |
{ |
return __u64_byteorder_swap(n); |
return uint64_t_byteorder_swap(n); |
} |
static inline __native __native_le2host(__native n) |
static inline unative_t unative_t_le2host(unative_t n) |
{ |
return __u32_byteorder_swap(n); |
return uint32_t_byteorder_swap(n); |
} |
#else |
# define __native_le2host(n) (n) |
# define __u64_le2host(n) (n) |
# define unative_t_le2host(n) (n) |
# define uint64_t_le2host(n) (n) |
#endif |
#endif |
/kernel/trunk/arch/mips32/include/boot.h |
---|
34,12 → 34,12 |
#include <arch/types.h> |
typedef struct { |
__address addr; |
__u32 size; |
uintptr_t addr; |
uint32_t size; |
} utask_t; |
typedef struct { |
__u32 cnt; |
uint32_t cnt; |
utask_t tasks[TASKMAP_MAX_RECORDS]; |
} bootinfo_t; |
/kernel/trunk/arch/mips32/include/types.h |
---|
37,28 → 37,28 |
#define NULL 0 |
typedef signed char __s8; |
typedef unsigned char __u8; |
typedef signed char int8_t; |
typedef unsigned char uint8_t; |
typedef signed short __s16; |
typedef unsigned short __u16; |
typedef signed short int16_t; |
typedef unsigned short uint16_t; |
typedef unsigned long __u32; |
typedef signed long __s32; |
typedef unsigned long uint32_t; |
typedef signed long int32_t; |
typedef unsigned long long __u64; |
typedef signed long long __s64; |
typedef unsigned long long uint64_t; |
typedef signed long long int64_t; |
typedef __u32 __address; |
typedef uint32_t uintptr_t; |
typedef __u32 ipl_t; |
typedef uint32_t ipl_t; |
typedef __u32 __native; |
typedef __s32 __snative; |
typedef uint32_t unative_t; |
typedef int32_t native_t; |
typedef struct pte pte_t; |
typedef __u32 pfn_t; |
typedef uint32_t pfn_t; |
#endif |
/kernel/trunk/arch/mips32/include/memstr.h |
---|
37,10 → 37,10 |
#define memcpy(dst, src, cnt) __builtin_memcpy((dst), (src), (cnt)) |
extern void memsetw(__address dst, size_t cnt, __u16 x); |
extern void memsetb(__address dst, size_t cnt, __u8 x); |
extern void memsetw(uintptr_t dst, size_t cnt, uint16_t x); |
extern void memsetb(uintptr_t dst, size_t cnt, uint8_t x); |
extern int memcmp(__address src, __address dst, int cnt); |
extern int memcmp(uintptr_t src, uintptr_t dst, int cnt); |
#endif |
/kernel/trunk/arch/mips32/include/arg.h |
---|
42,13 → 42,13 |
* To satisfy this, paddings must be sometimes inserted. |
*/ |
typedef __address va_list; |
typedef uintptr_t va_list; |
#define va_start(ap, lst) \ |
((ap) = (va_list)&(lst) + sizeof(lst)) |
#define va_arg(ap, type) \ |
(((type *)((ap) = (va_list)( (sizeof(type) <= 4) ? ((__address)((ap) + 2*4 - 1) & (~3)) : ((__address)((ap) + 2*8 -1) & (~7)) )))[-1]) |
(((type *)((ap) = (va_list)( (sizeof(type) <= 4) ? ((uintptr_t)((ap) + 2*4 - 1) & (~3)) : ((uintptr_t)((ap) + 2*8 -1) & (~7)) )))[-1]) |
#define va_copy(dst,src) ((dst)=(src)) |
/kernel/trunk/arch/mips32/include/asm.h |
---|
52,9 → 52,9 |
* The stack is assumed to be STACK_SIZE bytes long. |
* The stack must start on page boundary. |
*/ |
static inline __address get_stack_base(void) |
static inline uintptr_t get_stack_base(void) |
{ |
__address v; |
uintptr_t v; |
__asm__ volatile ("and %0, $29, %1\n" : "=r" (v) : "r" (~(STACK_SIZE-1))); |
62,9 → 62,9 |
} |
extern void cpu_halt(void); |
extern void asm_delay_loop(__u32 t); |
extern void userspace_asm(__address ustack, __address uspace_uarg, |
__address entry); |
extern void asm_delay_loop(uint32_t t); |
extern void userspace_asm(uintptr_t ustack, uintptr_t uspace_uarg, |
uintptr_t entry); |
#endif |
/kernel/trunk/arch/mips32/include/faddr.h |
---|
37,7 → 37,7 |
#include <arch/types.h> |
#define FADDR(fptr) ((__address) (fptr)) |
#define FADDR(fptr) ((uintptr_t) (fptr)) |
#endif |
/kernel/trunk/arch/mips32/include/cp0.h |
---|
63,14 → 63,14 |
#define cp0_mask_int(it) cp0_status_write(cp0_status_read() & ~(1<<(cp0_status_im_shift+(it)))) |
#define cp0_unmask_int(it) cp0_status_write(cp0_status_read() | (1<<(cp0_status_im_shift+(it)))) |
#define GEN_READ_CP0(nm,reg) static inline __u32 cp0_ ##nm##_read(void) \ |
#define GEN_READ_CP0(nm,reg) static inline uint32_t cp0_ ##nm##_read(void) \ |
{ \ |
__u32 retval; \ |
uint32_t retval; \ |
asm("mfc0 %0, $" #reg : "=r"(retval)); \ |
return retval; \ |
} |
#define GEN_WRITE_CP0(nm,reg) static inline void cp0_ ##nm##_write(__u32 val) \ |
#define GEN_WRITE_CP0(nm,reg) static inline void cp0_ ##nm##_write(uint32_t val) \ |
{ \ |
asm("mtc0 %0, $" #reg : : "r"(val) ); \ |
} |
/kernel/trunk/arch/mips32/include/mm/page.h |
---|
41,8 → 41,8 |
#define PAGE_SIZE FRAME_SIZE |
#ifndef __ASM__ |
# define KA2PA(x) (((__address) (x)) - 0x80000000) |
# define PA2KA(x) (((__address) (x)) + 0x80000000) |
# define KA2PA(x) (((uintptr_t) (x)) - 0x80000000) |
# define PA2KA(x) (((uintptr_t) (x)) + 0x80000000) |
#else |
# define KA2PA(x) ((x) - 0x80000000) |
# define PA2KA(x) ((x) + 0x80000000) |
100,7 → 100,7 |
#define SET_PTL3_FLAGS_ARCH(ptl2, i, x) |
#define SET_FRAME_FLAGS_ARCH(ptl3, i, x) set_pt_flags((pte_t *)(ptl3), (index_t)(i), (x)) |
#define PTE_VALID_ARCH(pte) (*((__u32 *) (pte)) != 0) |
#define PTE_VALID_ARCH(pte) (*((uint32_t *) (pte)) != 0) |
#define PTE_PRESENT_ARCH(pte) ((pte)->p != 0) |
#define PTE_GET_FRAME_ARCH(pte) ((pte)->pfn<<12) |
#define PTE_WRITABLE_ARCH(pte) ((pte)->w != 0) |
/kernel/trunk/arch/mips32/include/mm/asid.h |
---|
39,7 → 39,7 |
#define ASID_MAX_ARCH 255 /* 2^8 - 1 */ |
typedef __u8 asid_t; |
typedef uint8_t asid_t; |
#endif |
/kernel/trunk/arch/mips32/include/mm/tlb.h |
---|
75,7 → 75,7 |
unsigned : 2; /* zero */ |
#endif |
} __attribute__ ((packed)); |
__u32 value; |
uint32_t value; |
}; |
/** Page Table Entry. */ |
103,7 → 103,7 |
unsigned vpn2 : 19; |
#endif |
} __attribute__ ((packed)); |
__u32 value; |
uint32_t value; |
}; |
union page_mask { |
118,7 → 118,7 |
unsigned : 7; |
#endif |
} __attribute__ ((packed)); |
__u32 value; |
uint32_t value; |
}; |
union index { |
133,7 → 133,7 |
unsigned p : 1; |
#endif |
} __attribute__ ((packed)); |
__u32 value; |
uint32_t value; |
}; |
/** Probe TLB for Matching Entry |
/kernel/trunk/arch/mips32/include/debugger.h |
---|
50,9 → 50,9 |
#define BKPOINT_FUNCCALL (1 << 3) /**< Call a predefined function */ |
typedef struct { |
__address address; /**< Breakpoint address */ |
__native instruction; /**< Original instruction */ |
__native nextinstruction; /**< Original instruction following break */ |
uintptr_t address; /**< Breakpoint address */ |
unative_t instruction; /**< Original instruction */ |
unative_t nextinstruction; /**< Original instruction following break */ |
int flags; /**< Flags regarding breakpoint */ |
count_t counter; |
void (*bkfunc)(void *b, istate_t *istate); |
/kernel/trunk/arch/mips32/include/context.h |
---|
55,19 → 55,19 |
* function calls. |
*/ |
struct context { |
__address sp; |
__address pc; |
uintptr_t sp; |
uintptr_t pc; |
__u32 s0; |
__u32 s1; |
__u32 s2; |
__u32 s3; |
__u32 s4; |
__u32 s5; |
__u32 s6; |
__u32 s7; |
__u32 s8; |
__u32 gp; |
uint32_t s0; |
uint32_t s1; |
uint32_t s2; |
uint32_t s3; |
uint32_t s4; |
uint32_t s5; |
uint32_t s6; |
uint32_t s7; |
uint32_t s8; |
uint32_t gp; |
ipl_t ipl; |
}; |
/kernel/trunk/arch/mips32/include/cpu.h |
---|
38,8 → 38,8 |
#include <arch/types.h> |
struct cpu_arch { |
__u32 imp_num; |
__u32 rev_num; |
uint32_t imp_num; |
uint32_t rev_num; |
}; |
#endif |
/kernel/trunk/arch/mips32/include/drivers/arc.h |
---|
56,9 → 56,9 |
}cm_resource_type; |
typedef struct { |
__u8 type; |
__u8 sharedisposition; |
__u16 flags; |
uint8_t type; |
uint8_t sharedisposition; |
uint16_t flags; |
union { |
struct { |
long long start; /* 64-bit phys address */ |
77,8 → 77,8 |
}__attribute__ ((packed)) cm_resource_descriptor; |
typedef struct { |
__u16 version; |
__u16 revision; |
uint16_t version; |
uint16_t revision; |
unsigned long count; |
cm_resource_descriptor descr[1]; |
}__attribute__ ((packed)) cm_resource_list; |
153,23 → 153,23 |
arc_component_class class; |
arc_component_type type; |
arc_component_flags flags; |
__u16 revision; |
__u16 version; |
__u32 key; |
__u32 affinitymask; |
__u32 configdatasize; |
__u32 identifier_len; |
uint16_t revision; |
uint16_t version; |
uint32_t key; |
uint32_t affinitymask; |
uint32_t configdatasize; |
uint32_t identifier_len; |
char *identifier; |
} __attribute__ ((packed)) arc_component; |
typedef struct { |
__u16 year; |
__u16 month; |
__u16 day; |
__u16 hour; |
__u16 minutes; |
__u16 seconds; |
__u16 mseconds; |
uint16_t year; |
uint16_t month; |
uint16_t day; |
uint16_t hour; |
uint16_t minutes; |
uint16_t seconds; |
uint16_t mseconds; |
} __attribute__ ((packed)) arc_timeinfo; |
/* This is the SGI block structure, WinNT has it different */ |
186,8 → 186,8 |
typedef struct { |
arc_memorytype_t type; |
__u32 basepage; /* *4096 = baseaddr */ |
__u32 basecount; |
uint32_t basepage; /* *4096 = baseaddr */ |
uint32_t basecount; |
}arc_memdescriptor_t; |
typedef struct { |
197,9 → 197,9 |
typedef struct { |
long (*load)(void); /* ... */ |
long (*invoke)(__u32 eaddr,__u32 saddr,__u32 argc,char **argv, |
long (*invoke)(uint32_t eaddr,uint32_t saddr,uint32_t argc,char **argv, |
char **envp); |
long (*execute)(char *path,__u32 argc,char **argv,char **envp); |
long (*execute)(char *path,uint32_t argc,char **argv,char **envp); |
void (*halt)(void); |
void (*powerdown)(void); |
void (*restart)(void); |
221,13 → 221,13 |
/* 20 */ |
long (*reserved2)(void); |
arc_timeinfo * (*gettime)(void); |
__u32 (*getrelativetime)(void); |
uint32_t (*getrelativetime)(void); |
long (*getdirectoryentry)(); |
long (*open)(void); /* ... */ |
long (*close)(__u32 fileid); |
long (*read)(__u32 fileid,void *buf,__u32 n,__u32 *cnt); |
long (*getreadstatus)(__u32 fileid); |
long (*write)(__u32 fileid, void *buf,__u32 n,__u32 *cnt); |
long (*close)(uint32_t fileid); |
long (*read)(uint32_t fileid,void *buf,uint32_t n,uint32_t *cnt); |
long (*getreadstatus)(uint32_t fileid); |
long (*write)(uint32_t fileid, void *buf,uint32_t n,uint32_t *cnt); |
long (*seek)(void); /* ... */ |
/* 30 */ |
long (*mount)(void); /* ... */ |
234,7 → 234,7 |
char * (*getenvironmentvariable)(char *name); |
char * (*setenvironmentvariable)(char *name, char *value); |
long (*getfileinformation)(void); /* ... */ |
long (*setfileinformation)(__u32 fileid,__u32 attflags,__u32 attmask); |
long (*setfileinformation)(uint32_t fileid,uint32_t attflags,uint32_t attmask); |
void (*flushallcaches)(void); |
long (*testunicodecharacter)(void); /* ... */ |
long (*getdisplaystatus)(void); /* ... */ |
241,19 → 241,19 |
} arc_func_vector_t; |
typedef struct { |
__u32 signature; |
__u32 length; |
__u16 version; |
__u16 revision; |
uint32_t signature; |
uint32_t length; |
uint16_t version; |
uint16_t revision; |
void *restartblock; |
void *debugblock; |
void *gevector; |
void *utlbmissvector; |
__u32 firmwarevectorlen; |
uint32_t firmwarevectorlen; |
arc_func_vector_t *firmwarevector; |
__u32 privvectorlen; |
uint32_t privvectorlen; |
void *privvector; |
__u32 adaptercount; |
uint32_t adaptercount; |
}__attribute__ ((packed)) arc_sbp; |
extern int arc_init(void); |
/kernel/trunk/arch/mips32/src/exception.c |
---|
95,7 → 95,7 |
static void reserved_instr_exception(int n, istate_t *istate) |
{ |
if (*((__u32 *)istate->epc) == 0x7c03e83b) { |
if (*((uint32_t *)istate->epc) == 0x7c03e83b) { |
ASSERT(THREAD); |
istate->epc += 4; |
istate->v1 = istate->k1; |
139,7 → 139,7 |
static void interrupt_exception(int n, istate_t *istate) |
{ |
__u32 cause; |
uint32_t cause; |
int i; |
/* decode interrupt number and process the interrupt */ |
/kernel/trunk/arch/mips32/src/mips32.c |
---|
71,7 → 71,7 |
/* Why the linker moves the variable 64K away in assembler |
* when not in .text section ???????? |
*/ |
__address supervisor_sp __attribute__ ((section (".text"))); |
uintptr_t supervisor_sp __attribute__ ((section (".text"))); |
/* Stack pointer saved when entering user mode */ |
/* TODO: How do we do it on SMP system???? */ |
bootinfo_t bootinfo __attribute__ ((section (".text"))); |
81,7 → 81,7 |
/* Setup usermode */ |
init.cnt = bootinfo.cnt; |
__u32 i; |
uint32_t i; |
for (i = 0; i < bootinfo.cnt; i++) { |
init.tasks[i].addr = bootinfo.tasks[i].addr; |
146,10 → 146,10 |
cp0_status_write(cp0_status_read() | (cp0_status_exl_exception_bit | |
cp0_status_um_bit | |
cp0_status_ie_enabled_bit)); |
cp0_epc_write((__address) kernel_uarg->uspace_entry); |
userspace_asm(((__address) kernel_uarg->uspace_stack+PAGE_SIZE), |
(__address) kernel_uarg->uspace_uarg, |
(__address) kernel_uarg->uspace_entry); |
cp0_epc_write((uintptr_t) kernel_uarg->uspace_entry); |
userspace_asm(((uintptr_t) kernel_uarg->uspace_stack+PAGE_SIZE), |
(uintptr_t) kernel_uarg->uspace_uarg, |
(uintptr_t) kernel_uarg->uspace_entry); |
while (1) |
; |
} |
162,7 → 162,7 |
/** Perform mips32 specific tasks needed before the new thread is scheduled. */ |
void before_thread_runs_arch(void) |
{ |
supervisor_sp = (__address) &THREAD->kstack[THREAD_STACK_SIZE-SP_DELTA]; |
supervisor_sp = (uintptr_t) &THREAD->kstack[THREAD_STACK_SIZE-SP_DELTA]; |
} |
void after_thread_ran_arch(void) |
174,7 → 174,7 |
* We have it currently in K1, it is |
* possible to have it separately in the future. |
*/ |
__native sys_tls_set(__native addr) |
unative_t sys_tls_set(unative_t addr) |
{ |
return 0; |
} |
/kernel/trunk/arch/mips32/src/ddi/ddi.c |
---|
50,7 → 50,7 |
* |
* @return 0 on success or an error code from errno.h. |
*/ |
int ddi_iospace_enable_arch(task_t *task, __address ioaddr, size_t size) |
int ddi_iospace_enable_arch(task_t *task, uintptr_t ioaddr, size_t size) |
{ |
return 0; |
} |
/kernel/trunk/arch/mips32/src/debugger.c |
---|
91,8 → 91,8 |
}; |
static struct { |
__u32 andmask; |
__u32 value; |
uint32_t andmask; |
uint32_t value; |
}jmpinstr[] = { |
{0xf3ff0000, 0x41000000}, /* BCzF */ |
{0xf3ff0000, 0x41020000}, /* BCzFL */ |
125,7 → 125,7 |
* @param instr Instruction code |
* @return true - it is jump instruction, false otherwise |
*/ |
static bool is_jump(__native instr) |
static bool is_jump(unative_t instr) |
{ |
int i; |
153,12 → 153,12 |
/* Check, that the breakpoints do not conflict */ |
for (i=0; i<BKPOINTS_MAX; i++) { |
if (breakpoints[i].address == (__address)argv->intval) { |
if (breakpoints[i].address == (uintptr_t)argv->intval) { |
printf("Duplicate breakpoint %d.\n", i); |
spinlock_unlock(&bkpoints_lock); |
return 0; |
} else if (breakpoints[i].address == (__address)argv->intval + sizeof(__native) || \ |
breakpoints[i].address == (__address)argv->intval - sizeof(__native)) { |
} else if (breakpoints[i].address == (uintptr_t)argv->intval + sizeof(unative_t) || \ |
breakpoints[i].address == (uintptr_t)argv->intval - sizeof(unative_t)) { |
printf("Adjacent breakpoints not supported, conflict with %d.\n", i); |
spinlock_unlock(&bkpoints_lock); |
return 0; |
177,10 → 177,10 |
interrupts_restore(ipl); |
return 0; |
} |
cur->address = (__address) argv->intval; |
cur->address = (uintptr_t) argv->intval; |
printf("Adding breakpoint on address: %p\n", argv->intval); |
cur->instruction = ((__native *)cur->address)[0]; |
cur->nextinstruction = ((__native *)cur->address)[1]; |
cur->instruction = ((unative_t *)cur->address)[0]; |
cur->nextinstruction = ((unative_t *)cur->address)[1]; |
if (argv == &add_argv) { |
cur->flags = 0; |
} else { /* We are add extended */ |
192,7 → 192,7 |
cur->counter = 0; |
/* Set breakpoint */ |
*((__native *)cur->address) = 0x0d; |
*((unative_t *)cur->address) = 0x0d; |
spinlock_unlock(&bkpoint_lock); |
interrupts_restore(ipl); |
228,8 → 228,8 |
interrupts_restore(ipl); |
return 0; |
} |
((__u32 *)cur->address)[0] = cur->instruction; |
((__u32 *)cur->address)[1] = cur->nextinstruction; |
((uint32_t *)cur->address)[0] = cur->instruction; |
((uint32_t *)cur->address)[1] = cur->nextinstruction; |
cur->address = NULL; |
298,7 → 298,7 |
void debugger_bpoint(istate_t *istate) |
{ |
bpinfo_t *cur = NULL; |
__address fireaddr = istate->epc; |
uintptr_t fireaddr = istate->epc; |
int i; |
/* test branch delay slot */ |
315,7 → 315,7 |
} |
/* Reinst only breakpoint */ |
if ((breakpoints[i].flags & BKPOINT_REINST) \ |
&& (fireaddr ==breakpoints[i].address+sizeof(__native))) { |
&& (fireaddr ==breakpoints[i].address+sizeof(unative_t))) { |
cur = &breakpoints[i]; |
break; |
} |
323,9 → 323,9 |
if (cur) { |
if (cur->flags & BKPOINT_REINST) { |
/* Set breakpoint on first instruction */ |
((__u32 *)cur->address)[0] = 0x0d; |
((uint32_t *)cur->address)[0] = 0x0d; |
/* Return back the second */ |
((__u32 *)cur->address)[1] = cur->nextinstruction; |
((uint32_t *)cur->address)[1] = cur->nextinstruction; |
cur->flags &= ~BKPOINT_REINST; |
spinlock_unlock(&bkpoint_lock); |
return; |
338,11 → 338,11 |
fireaddr, get_symtab_entry(istate->epc)); |
/* Return first instruction back */ |
((__u32 *)cur->address)[0] = cur->instruction; |
((uint32_t *)cur->address)[0] = cur->instruction; |
if (! (cur->flags & BKPOINT_ONESHOT)) { |
/* Set Breakpoint on next instruction */ |
((__u32 *)cur->address)[1] = 0x0d; |
((uint32_t *)cur->address)[1] = 0x0d; |
cur->flags |= BKPOINT_REINST; |
} |
cur->flags |= BKPOINT_INPROG; |
/kernel/trunk/arch/mips32/src/mm/tlb.c |
---|
51,10 → 51,10 |
static void tlb_invalid_fail(istate_t *istate); |
static void tlb_modified_fail(istate_t *istate); |
static pte_t *find_mapping_and_check(__address badvaddr, int access, istate_t *istate, int *pfrc); |
static pte_t *find_mapping_and_check(uintptr_t badvaddr, int access, istate_t *istate, int *pfrc); |
static void prepare_entry_lo(entry_lo_t *lo, bool g, bool v, bool d, bool cacheable, __address pfn); |
static void prepare_entry_hi(entry_hi_t *hi, asid_t asid, __address addr); |
static void prepare_entry_lo(entry_lo_t *lo, bool g, bool v, bool d, bool cacheable, uintptr_t pfn); |
static void prepare_entry_hi(entry_hi_t *hi, asid_t asid, uintptr_t addr); |
/** Initialize TLB |
* |
96,7 → 96,7 |
entry_lo_t lo; |
entry_hi_t hi; |
asid_t asid; |
__address badvaddr; |
uintptr_t badvaddr; |
pte_t *pte; |
int pfrc; |
166,7 → 166,7 |
void tlb_invalid(istate_t *istate) |
{ |
tlb_index_t index; |
__address badvaddr; |
uintptr_t badvaddr; |
entry_lo_t lo; |
entry_hi_t hi; |
pte_t *pte; |
250,7 → 250,7 |
void tlb_modified(istate_t *istate) |
{ |
tlb_index_t index; |
__address badvaddr; |
uintptr_t badvaddr; |
entry_lo_t lo; |
entry_hi_t hi; |
pte_t *pte; |
383,7 → 383,7 |
* |
* @return PTE on success, NULL otherwise. |
*/ |
pte_t *find_mapping_and_check(__address badvaddr, int access, istate_t *istate, int *pfrc) |
pte_t *find_mapping_and_check(uintptr_t badvaddr, int access, istate_t *istate, int *pfrc) |
{ |
entry_hi_t hi; |
pte_t *pte; |
445,7 → 445,7 |
} |
} |
void prepare_entry_lo(entry_lo_t *lo, bool g, bool v, bool d, bool cacheable, __address pfn) |
void prepare_entry_lo(entry_lo_t *lo, bool g, bool v, bool d, bool cacheable, uintptr_t pfn) |
{ |
lo->value = 0; |
lo->g = g; |
455,7 → 455,7 |
lo->pfn = pfn; |
} |
void prepare_entry_hi(entry_hi_t *hi, asid_t asid, __address addr) |
void prepare_entry_hi(entry_hi_t *hi, asid_t asid, uintptr_t addr) |
{ |
hi->value = ALIGN_DOWN(addr, PAGE_SIZE * 2); |
hi->asid = asid; |
567,7 → 567,7 |
* @param page First page whose TLB entry is to be invalidated. |
* @param cnt Number of entries to invalidate. |
*/ |
void tlb_invalidate_pages(asid_t asid, __address page, count_t cnt) |
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt) |
{ |
int i; |
ipl_t ipl; |
/kernel/trunk/arch/mips32/src/mm/page.c |
---|
45,7 → 45,7 |
* - on mips, all devices are already mapped into kernel space, |
* translate the physical address to uncached area |
*/ |
__address hw_map(__address physaddr, size_t size) |
uintptr_t hw_map(uintptr_t physaddr, size_t size) |
{ |
return physaddr + 0xa0000000; |
} |
/kernel/trunk/arch/mips32/src/interrupt.c |
---|
132,7 → 132,7 |
} |
/* Reregister irq to be IPC-ready */ |
void irq_ipc_bind_arch(__native irq) |
void irq_ipc_bind_arch(unative_t irq) |
{ |
/* Do not allow to redefine timer */ |
/* Swint0, Swint1 are already handled */ |
/kernel/trunk/arch/mips32/src/drivers/arc.c |
---|
142,7 → 142,7 |
switch (configdata->descr[i].type) { |
case CmResourceTypePort: |
printf("Port: %p-size:%d ", |
(__address)configdata->descr[i].u.port.start, |
(uintptr_t)configdata->descr[i].u.port.start, |
configdata->descr[i].u.port.length); |
break; |
case CmResourceTypeInterrupt: |
152,7 → 152,7 |
break; |
case CmResourceTypeMemory: |
printf("Memory: %p-size:%d ", |
(__address)configdata->descr[i].u.port.start, |
(uintptr_t)configdata->descr[i].u.port.start, |
configdata->descr[i].u.port.length); |
break; |
default: |
236,7 → 236,7 |
/** Print charactor to console */ |
static void arc_putchar(char ch) |
{ |
__u32 cnt; |
uint32_t cnt; |
ipl_t ipl; |
/* TODO: Should be spinlock? */ |
293,7 → 293,7 |
static void arc_keyboard_poll(void) |
{ |
char ch; |
__u32 count; |
uint32_t count; |
long result; |
if (! kbd_polling_enabled) |
316,7 → 316,7 |
static char arc_read(chardev_t *dev) |
{ |
char ch; |
__u32 count; |
uint32_t count; |
long result; |
result = arc_entry->read(0, &ch, 1, &count); |
380,7 → 380,7 |
{ |
arc_memdescriptor_t *desc; |
int total = 0; |
__address base; |
uintptr_t base; |
size_t basesize; |
desc = arc_entry->getmemorydescriptor(NULL); |
/kernel/trunk/arch/ia32/include/interrupt.h |
---|
69,24 → 69,24 |
#define VECTOR_DEBUG_IPI (IVT_FREEBASE+2) |
struct istate { |
__u32 eax; |
__u32 ecx; |
__u32 edx; |
__u32 esi; |
__u32 edi; |
__u32 ebp; |
__u32 ebx; |
uint32_t eax; |
uint32_t ecx; |
uint32_t edx; |
uint32_t esi; |
uint32_t edi; |
uint32_t ebp; |
uint32_t ebx; |
__u32 gs; |
__u32 fs; |
__u32 es; |
__u32 ds; |
uint32_t gs; |
uint32_t fs; |
uint32_t es; |
uint32_t ds; |
__u32 error_word; |
__u32 eip; |
__u32 cs; |
__u32 eflags; |
__u32 stack[]; |
uint32_t error_word; |
uint32_t eip; |
uint32_t cs; |
uint32_t eflags; |
uint32_t stack[]; |
}; |
/** Return true if exception happened while in userspace */ |
95,18 → 95,18 |
return !(istate->eip & 0x80000000); |
} |
static inline void istate_set_retaddr(istate_t *istate, __address retaddr) |
static inline void istate_set_retaddr(istate_t *istate, uintptr_t retaddr) |
{ |
istate->eip = retaddr; |
} |
static inline __native istate_get_pc(istate_t *istate) |
static inline unative_t istate_get_pc(istate_t *istate) |
{ |
return istate->eip; |
} |
extern void (* disable_irqs_function)(__u16 irqmask); |
extern void (* enable_irqs_function)(__u16 irqmask); |
extern void (* disable_irqs_function)(uint16_t irqmask); |
extern void (* enable_irqs_function)(uint16_t irqmask); |
extern void (* eoi_function)(void); |
extern void PRINT_INFO_ERRCODE(istate_t *istate); |
118,8 → 118,8 |
extern void syscall(int n, istate_t *istate); |
extern void tlb_shootdown_ipi(int n, istate_t *istate); |
extern void trap_virtual_enable_irqs(__u16 irqmask); |
extern void trap_virtual_disable_irqs(__u16 irqmask); |
extern void trap_virtual_enable_irqs(uint16_t irqmask); |
extern void trap_virtual_disable_irqs(uint16_t irqmask); |
extern void trap_virtual_eoi(void); |
#endif |
/kernel/trunk/arch/ia32/include/fpu_context.h |
---|
45,7 → 45,7 |
struct fpu_context { |
__u8 fpu[512]; /* FXSAVE & FXRSTOR storage area */ |
uint8_t fpu[512]; /* FXSAVE & FXRSTOR storage area */ |
}; |
/kernel/trunk/arch/ia32/include/byteorder.h |
---|
36,8 → 36,8 |
#define __ia32_BYTEORDER_H__ |
/* IA-32 is little-endian */ |
#define __native_le2host(n) (n) |
#define __u64_le2host(n) (n) |
#define unative_t_le2host(n) (n) |
#define uint64_t_le2host(n) (n) |
#endif |
/kernel/trunk/arch/ia32/include/cpuid.h |
---|
38,10 → 38,10 |
#include <arch/types.h> |
struct cpu_info { |
__u32 cpuid_eax; |
__u32 cpuid_ebx; |
__u32 cpuid_ecx; |
__u32 cpuid_edx; |
uint32_t cpuid_eax; |
uint32_t cpuid_ebx; |
uint32_t cpuid_ecx; |
uint32_t cpuid_edx; |
} __attribute__ ((packed)); |
struct __cpuid_extended_feature_info { |
52,7 → 52,7 |
typedef union cpuid_extended_feature_info |
{ |
struct __cpuid_extended_feature_info bits; |
__u32 word; |
uint32_t word; |
}cpuid_extended_feature_info; |
68,13 → 68,13 |
typedef union cpuid_feature_info |
{ |
struct __cpuid_feature_info bits; |
__u32 word ; |
uint32_t word ; |
}cpuid_feature_info; |
static inline __u32 has_cpuid(void) |
static inline uint32_t has_cpuid(void) |
{ |
__u32 val, ret; |
uint32_t val, ret; |
__asm__ volatile ( |
"pushf\n" /* read flags */ |
97,7 → 97,7 |
return ret; |
} |
static inline void cpuid(__u32 cmd, struct cpu_info *info) |
static inline void cpuid(uint32_t cmd, struct cpu_info *info) |
{ |
__asm__ volatile ( |
"movl %4, %%eax\n" |
/kernel/trunk/arch/ia32/include/types.h |
---|
26,7 → 26,7 |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup ia32 |
/** @addtogroup ia32 |
* @{ |
*/ |
/** @file |
37,28 → 37,28 |
#define NULL 0 |
typedef signed char __s8; |
typedef signed short __s16; |
typedef signed long __s32; |
typedef signed long long __s64; |
typedef signed char int8_t; |
typedef signed short int16_t; |
typedef signed long int32_t; |
typedef signed long long int64_t; |
typedef unsigned char __u8; |
typedef unsigned short __u16; |
typedef unsigned long __u32; |
typedef unsigned long long __u64; |
typedef unsigned char uint8_t; |
typedef unsigned short uint16_t; |
typedef unsigned long uint32_t; |
typedef unsigned long long uint64_t; |
typedef __u32 __address; |
typedef __u32 pfn_t; |
typedef uint32_t uintptr_t; |
typedef uint32_t pfn_t; |
typedef __u32 ipl_t; |
typedef uint32_t ipl_t; |
typedef __u32 __native; |
typedef __s32 __snative; |
typedef uint32_t unative_t; |
typedef int32_t native_t; |
typedef struct page_specifier pte_t; |
#endif |
/** @} |
/** @} |
*/ |
/kernel/trunk/arch/ia32/include/bios/bios.h |
---|
39,7 → 39,7 |
#define BIOS_EBDA_PTR 0x40e |
extern __address ebda; |
extern uintptr_t ebda; |
extern void bios_init(void); |
/kernel/trunk/arch/ia32/include/memstr.h |
---|
49,7 → 49,7 |
*/ |
static inline void * memcpy(void * dst, const void * src, size_t cnt) |
{ |
__native d0, d1, d2; |
unative_t d0, d1, d2; |
__asm__ __volatile__( |
/* copy all full dwords */ |
65,7 → 65,7 |
/* exit from asm block */ |
"1:\n" |
: "=&c" (d0), "=&D" (d1), "=&S" (d2) |
: "0" ((__native) (cnt / 4)), "g" ((__native) cnt), "1" ((__native) dst), "2" ((__native) src) |
: "0" ((unative_t) (cnt / 4)), "g" ((unative_t) cnt), "1" ((unative_t) dst), "2" ((unative_t) src) |
: "memory"); |
return dst; |
85,7 → 85,7 |
*/ |
static inline int memcmp(const void * src, const void * dst, size_t cnt) |
{ |
__u32 d0, d1, d2; |
uint32_t d0, d1, d2; |
int ret; |
__asm__ ( |
95,7 → 95,7 |
"addl $1, %0\n\t" |
"1:\n" |
: "=a" (ret), "=%S" (d0), "=&D" (d1), "=&c" (d2) |
: "0" (0), "1" ((__native) src), "2" ((__native) dst), "3" ((__native) cnt) |
: "0" (0), "1" ((unative_t) src), "2" ((unative_t) dst), "3" ((unative_t) cnt) |
); |
return ret; |
110,9 → 110,9 |
* @param cnt Number of words |
* @param x Value to fill |
*/ |
static inline void memsetw(__address dst, size_t cnt, __u16 x) |
static inline void memsetw(uintptr_t dst, size_t cnt, uint16_t x) |
{ |
__u32 d0, d1; |
uint32_t d0, d1; |
__asm__ __volatile__ ( |
"rep stosw\n\t" |
132,9 → 132,9 |
* @param cnt Number of bytes |
* @param x Value to fill |
*/ |
static inline void memsetb(__address dst, size_t cnt, __u8 x) |
static inline void memsetb(uintptr_t dst, size_t cnt, uint8_t x) |
{ |
__u32 d0, d1; |
uint32_t d0, d1; |
__asm__ __volatile__ ( |
"rep stosb\n\t" |
/kernel/trunk/arch/ia32/include/smp/apic.h |
---|
105,13 → 105,13 |
#define MODEL_CLUSTER 0x0 |
/** Interrupt Command Register. */ |
#define ICRlo (0x300/sizeof(__u32)) |
#define ICRhi (0x310/sizeof(__u32)) |
#define ICRlo (0x300/sizeof(uint32_t)) |
#define ICRhi (0x310/sizeof(uint32_t)) |
struct icr { |
union { |
__u32 lo; |
uint32_t lo; |
struct { |
__u8 vector; /**< Interrupt Vector. */ |
uint8_t vector; /**< Interrupt Vector. */ |
unsigned delmod : 3; /**< Delivery Mode. */ |
unsigned destmod : 1; /**< Destination Mode. */ |
unsigned delivs : 1; /**< Delivery status (RO). */ |
124,10 → 124,10 |
} __attribute__ ((packed)); |
}; |
union { |
__u32 hi; |
uint32_t hi; |
struct { |
unsigned : 24; /**< Reserved. */ |
__u8 dest; /**< Destination field. */ |
uint8_t dest; /**< Destination field. */ |
} __attribute__ ((packed)); |
}; |
} __attribute__ ((packed)); |
134,13 → 134,13 |
typedef struct icr icr_t; |
/* End Of Interrupt. */ |
#define EOI (0x0b0/sizeof(__u32)) |
#define EOI (0x0b0/sizeof(uint32_t)) |
/** Error Status Register. */ |
#define ESR (0x280/sizeof(__u32)) |
#define ESR (0x280/sizeof(uint32_t)) |
union esr { |
__u32 value; |
__u8 err_bitmap; |
uint32_t value; |
uint8_t err_bitmap; |
struct { |
unsigned send_checksum_error : 1; |
unsigned receive_checksum_error : 1; |
156,9 → 156,9 |
typedef union esr esr_t; |
/* Task Priority Register */ |
#define TPR (0x080/sizeof(__u32)) |
#define TPR (0x080/sizeof(uint32_t)) |
union tpr { |
__u32 value; |
uint32_t value; |
struct { |
unsigned pri_sc : 4; /**< Task Priority Sub-Class. */ |
unsigned pri : 4; /**< Task Priority. */ |
167,11 → 167,11 |
typedef union tpr tpr_t; |
/** Spurious-Interrupt Vector Register. */ |
#define SVR (0x0f0/sizeof(__u32)) |
#define SVR (0x0f0/sizeof(uint32_t)) |
union svr { |
__u32 value; |
uint32_t value; |
struct { |
__u8 vector; /**< Spurious Vector. */ |
uint8_t vector; /**< Spurious Vector. */ |
unsigned lapic_enabled : 1; /**< APIC Software Enable/Disable. */ |
unsigned focus_checking : 1; /**< Focus Processor Checking. */ |
unsigned : 22; /**< Reserved. */ |
180,9 → 180,9 |
typedef union svr svr_t; |
/** Time Divide Configuration Register. */ |
#define TDCR (0x3e0/sizeof(__u32)) |
#define TDCR (0x3e0/sizeof(uint32_t)) |
union tdcr { |
__u32 value; |
uint32_t value; |
struct { |
unsigned div_value : 4; /**< Divide Value, bit 2 is always 0. */ |
unsigned : 28; /**< Reserved. */ |
191,17 → 191,17 |
typedef union tdcr tdcr_t; |
/* Initial Count Register for Timer */ |
#define ICRT (0x380/sizeof(__u32)) |
#define ICRT (0x380/sizeof(uint32_t)) |
/* Current Count Register for Timer */ |
#define CCRT (0x390/sizeof(__u32)) |
#define CCRT (0x390/sizeof(uint32_t)) |
/** LVT Timer register. */ |
#define LVT_Tm (0x320/sizeof(__u32)) |
#define LVT_Tm (0x320/sizeof(uint32_t)) |
union lvt_tm { |
__u32 value; |
uint32_t value; |
struct { |
__u8 vector; /**< Local Timer Interrupt vector. */ |
uint8_t vector; /**< Local Timer Interrupt vector. */ |
unsigned : 4; /**< Reserved. */ |
unsigned delivs : 1; /**< Delivery status (RO). */ |
unsigned : 3; /**< Reserved. */ |
213,12 → 213,12 |
typedef union lvt_tm lvt_tm_t; |
/** LVT LINT registers. */ |
#define LVT_LINT0 (0x350/sizeof(__u32)) |
#define LVT_LINT1 (0x360/sizeof(__u32)) |
#define LVT_LINT0 (0x350/sizeof(uint32_t)) |
#define LVT_LINT1 (0x360/sizeof(uint32_t)) |
union lvt_lint { |
__u32 value; |
uint32_t value; |
struct { |
__u8 vector; /**< LINT Interrupt vector. */ |
uint8_t vector; /**< LINT Interrupt vector. */ |
unsigned delmod : 3; /**< Delivery Mode. */ |
unsigned : 1; /**< Reserved. */ |
unsigned delivs : 1; /**< Delivery status (RO). */ |
232,11 → 232,11 |
typedef union lvt_lint lvt_lint_t; |
/** LVT Error register. */ |
#define LVT_Err (0x370/sizeof(__u32)) |
#define LVT_Err (0x370/sizeof(uint32_t)) |
union lvt_error { |
__u32 value; |
uint32_t value; |
struct { |
__u8 vector; /**< Local Timer Interrupt vector. */ |
uint8_t vector; /**< Local Timer Interrupt vector. */ |
unsigned : 4; /**< Reserved. */ |
unsigned delivs : 1; /**< Delivery status (RO). */ |
unsigned : 3; /**< Reserved. */ |
247,18 → 247,18 |
typedef union lvt_error lvt_error_t; |
/** Local APIC ID Register. */ |
#define L_APIC_ID (0x020/sizeof(__u32)) |
#define L_APIC_ID (0x020/sizeof(uint32_t)) |
union l_apic_id { |
__u32 value; |
uint32_t value; |
struct { |
unsigned : 24; /**< Reserved. */ |
__u8 apic_id; /**< Local APIC ID. */ |
uint8_t apic_id; /**< Local APIC ID. */ |
} __attribute__ ((packed)); |
}; |
typedef union l_apic_id l_apic_id_t; |
/** Local APIC Version Register */ |
#define LAVR (0x030/sizeof(__u32)) |
#define LAVR (0x030/sizeof(uint32_t)) |
#define LAVR_Mask 0xff |
#define is_local_apic(x) (((x)&LAVR_Mask&0xf0)==0x1) |
#define is_82489DX_apic(x) ((((x)&LAVR_Mask&0xf0)==0x0)) |
265,20 → 265,20 |
#define is_local_xapic(x) (((x)&LAVR_Mask)==0x14) |
/** Logical Destination Register. */ |
#define LDR (0x0d0/sizeof(__u32)) |
#define LDR (0x0d0/sizeof(uint32_t)) |
union ldr { |
__u32 value; |
uint32_t value; |
struct { |
unsigned : 24; /**< Reserved. */ |
__u8 id; /**< Logical APIC ID. */ |
uint8_t id; /**< Logical APIC ID. */ |
} __attribute__ ((packed)); |
}; |
typedef union ldr ldr_t; |
/** Destination Format Register. */ |
#define DFR (0x0e0/sizeof(__u32)) |
#define DFR (0x0e0/sizeof(uint32_t)) |
union dfr { |
__u32 value; |
uint32_t value; |
struct { |
unsigned : 28; /**< Reserved, all ones. */ |
unsigned model : 4; /**< Model. */ |
287,8 → 287,8 |
typedef union dfr dfr_t; |
/* IO APIC */ |
#define IOREGSEL (0x00/sizeof(__u32)) |
#define IOWIN (0x10/sizeof(__u32)) |
#define IOREGSEL (0x00/sizeof(uint32_t)) |
#define IOWIN (0x10/sizeof(uint32_t)) |
#define IOAPICID 0x00 |
#define IOAPICVER 0x01 |
297,9 → 297,9 |
/** I/O Register Select Register. */ |
union io_regsel { |
__u32 value; |
uint32_t value; |
struct { |
__u8 reg_addr; /**< APIC Register Address. */ |
uint8_t reg_addr; /**< APIC Register Address. */ |
unsigned : 24; /**< Reserved. */ |
} __attribute__ ((packed)); |
}; |
308,9 → 308,9 |
/** I/O Redirection Register. */ |
struct io_redirection_reg { |
union { |
__u32 lo; |
uint32_t lo; |
struct { |
__u8 intvec; /**< Interrupt Vector. */ |
uint8_t intvec; /**< Interrupt Vector. */ |
unsigned delmod : 3; /**< Delivery Mode. */ |
unsigned destmod : 1; /**< Destination mode. */ |
unsigned delivs : 1; /**< Delivery status (RO). */ |
322,10 → 322,10 |
} __attribute__ ((packed)); |
}; |
union { |
__u32 hi; |
uint32_t hi; |
struct { |
unsigned : 24; /**< Reserved. */ |
__u8 dest : 8; /**< Destination Field. */ |
uint8_t dest : 8; /**< Destination Field. */ |
} __attribute__ ((packed)); |
}; |
335,7 → 335,7 |
/** IO APIC Identification Register. */ |
union io_apic_id { |
__u32 value; |
uint32_t value; |
struct { |
unsigned : 24; /**< Reserved. */ |
unsigned apic_id : 4; /**< IO APIC ID. */ |
344,25 → 344,25 |
}; |
typedef union io_apic_id io_apic_id_t; |
extern volatile __u32 *l_apic; |
extern volatile __u32 *io_apic; |
extern volatile uint32_t *l_apic; |
extern volatile uint32_t *io_apic; |
extern __u32 apic_id_mask; |
extern uint32_t apic_id_mask; |
extern void apic_init(void); |
extern void l_apic_init(void); |
extern void l_apic_eoi(void); |
extern int l_apic_broadcast_custom_ipi(__u8 vector); |
extern int l_apic_send_init_ipi(__u8 apicid); |
extern int l_apic_broadcast_custom_ipi(uint8_t vector); |
extern int l_apic_send_init_ipi(uint8_t apicid); |
extern void l_apic_debug(void); |
extern __u8 l_apic_id(void); |
extern uint8_t l_apic_id(void); |
extern __u32 io_apic_read(__u8 address); |
extern void io_apic_write(__u8 address , __u32 x); |
extern void io_apic_change_ioredtbl(int pin, int dest, __u8 v, int flags); |
extern void io_apic_disable_irqs(__u16 irqmask); |
extern void io_apic_enable_irqs(__u16 irqmask); |
extern uint32_t io_apic_read(uint8_t address); |
extern void io_apic_write(uint8_t address , uint32_t x); |
extern void io_apic_change_ioredtbl(int pin, int dest, uint8_t v, int flags); |
extern void io_apic_disable_irqs(uint16_t irqmask); |
extern void io_apic_enable_irqs(uint16_t irqmask); |
#endif |
/kernel/trunk/arch/ia32/include/smp/mps.h |
---|
45,79 → 45,79 |
#define CT_EXT_ENTRY_LEN 1 |
struct mps_fs { |
__u32 signature; |
__u32 configuration_table; |
__u8 length; |
__u8 revision; |
__u8 checksum; |
__u8 config_type; |
__u8 mpfib2; |
__u8 mpfib3; |
__u8 mpfib4; |
__u8 mpfib5; |
uint32_t signature; |
uint32_t configuration_table; |
uint8_t length; |
uint8_t revision; |
uint8_t checksum; |
uint8_t config_type; |
uint8_t mpfib2; |
uint8_t mpfib3; |
uint8_t mpfib4; |
uint8_t mpfib5; |
} __attribute__ ((packed)); |
struct mps_ct { |
__u32 signature; |
__u16 base_table_length; |
__u8 revision; |
__u8 checksum; |
__u8 oem_id[8]; |
__u8 product_id[12]; |
__u32 oem_table; |
__u16 oem_table_size; |
__u16 entry_count; |
__u32 l_apic; |
__u16 ext_table_length; |
__u8 ext_table_checksum; |
__u8 xxx; |
__u8 base_table[0]; |
uint32_t signature; |
uint16_t base_table_length; |
uint8_t revision; |
uint8_t checksum; |
uint8_t oem_id[8]; |
uint8_t product_id[12]; |
uint32_t oem_table; |
uint16_t oem_table_size; |
uint16_t entry_count; |
uint32_t l_apic; |
uint16_t ext_table_length; |
uint8_t ext_table_checksum; |
uint8_t xxx; |
uint8_t base_table[0]; |
} __attribute__ ((packed)); |
struct __processor_entry { |
__u8 type; |
__u8 l_apic_id; |
__u8 l_apic_version; |
__u8 cpu_flags; |
__u8 cpu_signature[4]; |
__u32 feature_flags; |
__u32 xxx[2]; |
uint8_t type; |
uint8_t l_apic_id; |
uint8_t l_apic_version; |
uint8_t cpu_flags; |
uint8_t cpu_signature[4]; |
uint32_t feature_flags; |
uint32_t xxx[2]; |
} __attribute__ ((packed)); |
struct __bus_entry { |
__u8 type; |
__u8 bus_id; |
__u8 bus_type[6]; |
uint8_t type; |
uint8_t bus_id; |
uint8_t bus_type[6]; |
} __attribute__ ((packed)); |
struct __io_apic_entry { |
__u8 type; |
__u8 io_apic_id; |
__u8 io_apic_version; |
__u8 io_apic_flags; |
__u32 io_apic; |
uint8_t type; |
uint8_t io_apic_id; |
uint8_t io_apic_version; |
uint8_t io_apic_flags; |
uint32_t io_apic; |
} __attribute__ ((packed)); |
struct __io_intr_entry { |
__u8 type; |
__u8 intr_type; |
__u8 poel; |
__u8 xxx; |
__u8 src_bus_id; |
__u8 src_bus_irq; |
__u8 dst_io_apic_id; |
__u8 dst_io_apic_pin; |
uint8_t type; |
uint8_t intr_type; |
uint8_t poel; |
uint8_t xxx; |
uint8_t src_bus_id; |
uint8_t src_bus_irq; |
uint8_t dst_io_apic_id; |
uint8_t dst_io_apic_pin; |
} __attribute__ ((packed)); |
struct __l_intr_entry { |
__u8 type; |
__u8 intr_type; |
__u8 poel; |
__u8 xxx; |
__u8 src_bus_id; |
__u8 src_bus_irq; |
__u8 dst_l_apic_id; |
__u8 dst_l_apic_pin; |
uint8_t type; |
uint8_t intr_type; |
uint8_t poel; |
uint8_t xxx; |
uint8_t src_bus_id; |
uint8_t src_bus_irq; |
uint8_t dst_l_apic_id; |
uint8_t dst_l_apic_pin; |
} __attribute__ ((packed)); |
/kernel/trunk/arch/ia32/include/smp/smp.h |
---|
43,7 → 43,7 |
count_t (* cpu_count)(void); /**< Return number of detected processors. */ |
bool (* cpu_enabled)(index_t i); /**< Check whether the processor of index i is enabled. */ |
bool (*cpu_bootstrap)(index_t i); /**< Check whether the processor of index i is BSP. */ |
__u8 (*cpu_apic_id)(index_t i); /**< Return APIC ID of the processor of index i. */ |
uint8_t (*cpu_apic_id)(index_t i); /**< Return APIC ID of the processor of index i. */ |
int (*irq_to_pin)(int irq); /**< Return mapping between irq and APIC pin. */ |
}; |
/kernel/trunk/arch/ia32/include/atomic.h |
---|
83,8 → 83,8 |
#define atomic_preinc(val) (atomic_postinc(val)+1) |
#define atomic_predec(val) (atomic_postdec(val)-1) |
static inline __u32 test_and_set(atomic_t *val) { |
__u32 v; |
static inline uint32_t test_and_set(atomic_t *val) { |
uint32_t v; |
__asm__ volatile ( |
"movl $1, %0\n" |
98,7 → 98,7 |
/** ia32 specific fast spinlock */ |
static inline void atomic_lock_arch(atomic_t *val) |
{ |
__u32 tmp; |
uint32_t tmp; |
preemption_disable(); |
__asm__ volatile ( |
/kernel/trunk/arch/ia32/include/pm.h |
---|
85,8 → 85,8 |
#include <arch/context.h> |
struct ptr_16_32 { |
__u16 limit; |
__u32 base; |
uint16_t limit; |
uint32_t base; |
} __attribute__ ((packed)); |
typedef struct ptr_16_32 ptr_16_32_t; |
114,45 → 114,45 |
typedef struct idescriptor idescriptor_t; |
struct tss { |
__u16 link; |
uint16_t link; |
unsigned : 16; |
__u32 esp0; |
__u16 ss0; |
uint32_t esp0; |
uint16_t ss0; |
unsigned : 16; |
__u32 esp1; |
__u16 ss1; |
uint32_t esp1; |
uint16_t ss1; |
unsigned : 16; |
__u32 esp2; |
__u16 ss2; |
uint32_t esp2; |
uint16_t ss2; |
unsigned : 16; |
__u32 cr3; |
__u32 eip; |
__u32 eflags; |
__u32 eax; |
__u32 ecx; |
__u32 edx; |
__u32 ebx; |
__u32 esp; |
__u32 ebp; |
__u32 esi; |
__u32 edi; |
__u16 es; |
uint32_t cr3; |
uint32_t eip; |
uint32_t eflags; |
uint32_t eax; |
uint32_t ecx; |
uint32_t edx; |
uint32_t ebx; |
uint32_t esp; |
uint32_t ebp; |
uint32_t esi; |
uint32_t edi; |
uint16_t es; |
unsigned : 16; |
__u16 cs; |
uint16_t cs; |
unsigned : 16; |
__u16 ss; |
uint16_t ss; |
unsigned : 16; |
__u16 ds; |
uint16_t ds; |
unsigned : 16; |
__u16 fs; |
uint16_t fs; |
unsigned : 16; |
__u16 gs; |
uint16_t gs; |
unsigned : 16; |
__u16 ldtr; |
uint16_t ldtr; |
unsigned : 16; |
unsigned : 16; |
__u16 iomap_base; |
__u8 iomap[TSS_IOMAP_SIZE]; |
uint16_t iomap_base; |
uint8_t iomap[TSS_IOMAP_SIZE]; |
} __attribute__ ((packed)); |
typedef struct tss tss_t; |
165,14 → 165,14 |
extern void pm_init(void); |
extern void gdt_setbase(descriptor_t *d, __address base); |
extern void gdt_setlimit(descriptor_t *d, __u32 limit); |
extern void gdt_setbase(descriptor_t *d, uintptr_t base); |
extern void gdt_setlimit(descriptor_t *d, uint32_t limit); |
extern void idt_init(void); |
extern void idt_setoffset(idescriptor_t *d, __address offset); |
extern void idt_setoffset(idescriptor_t *d, uintptr_t offset); |
extern void tss_initialize(tss_t *t); |
extern void set_tls_desc(__address tls); |
extern void set_tls_desc(uintptr_t tls); |
#endif /* __ASM__ */ |
/kernel/trunk/arch/ia32/include/boot/memmap.h |
---|
58,16 → 58,16 |
#include <arch/types.h> |
struct e820memmap_ { |
__u64 base_address; |
__u64 size; |
__u32 type; |
uint64_t base_address; |
uint64_t size; |
uint32_t type; |
} __attribute__ ((packed)); |
extern struct e820memmap_ e820table[MEMMAP_E820_MAX_RECORDS]; |
extern __u8 e820counter; |
extern uint8_t e820counter; |
extern __u32 e801memorysize; /**< Size of available memory in KB. */ |
extern uint32_t e801memorysize; /**< Size of available memory in KB. */ |
#endif |
/kernel/trunk/arch/ia32/include/proc/thread.h |
---|
38,7 → 38,7 |
#include <arch/types.h> |
typedef struct { |
__native tls; |
unative_t tls; |
} thread_arch_t; |
#endif |
/kernel/trunk/arch/ia32/include/asm.h |
---|
40,7 → 40,7 |
#include <arch/types.h> |
#include <config.h> |
extern __u32 interrupt_handler_size; |
extern uint32_t interrupt_handler_size; |
extern void paging_on(void); |
49,8 → 49,8 |
extern void enable_l_apic_in_msr(void); |
extern void asm_delay_loop(__u32 t); |
extern void asm_fake_loop(__u32 t); |
extern void asm_delay_loop(uint32_t t); |
extern void asm_fake_loop(uint32_t t); |
/** Halt CPU |
60,14 → 60,14 |
static inline void cpu_halt(void) { __asm__("hlt\n"); }; |
static inline void cpu_sleep(void) { __asm__("hlt\n"); }; |
#define GEN_READ_REG(reg) static inline __native read_ ##reg (void) \ |
#define GEN_READ_REG(reg) static inline unative_t read_ ##reg (void) \ |
{ \ |
__native res; \ |
unative_t res; \ |
__asm__ volatile ("movl %%" #reg ", %0" : "=r" (res) ); \ |
return res; \ |
} |
#define GEN_WRITE_REG(reg) static inline void write_ ##reg (__native regn) \ |
#define GEN_WRITE_REG(reg) static inline void write_ ##reg (unative_t regn) \ |
{ \ |
__asm__ volatile ("movl %0, %%" #reg : : "r" (regn)); \ |
} |
98,7 → 98,7 |
* @param port Port to write to |
* @param val Value to write |
*/ |
static inline void outb(__u16 port, __u8 val) { __asm__ volatile ("outb %b0, %w1\n" : : "a" (val), "d" (port) ); } |
static inline void outb(uint16_t port, uint8_t val) { __asm__ volatile ("outb %b0, %w1\n" : : "a" (val), "d" (port) ); } |
/** Word to port |
* |
107,7 → 107,7 |
* @param port Port to write to |
* @param val Value to write |
*/ |
static inline void outw(__u16 port, __u16 val) { __asm__ volatile ("outw %w0, %w1\n" : : "a" (val), "d" (port) ); } |
static inline void outw(uint16_t port, uint16_t val) { __asm__ volatile ("outw %w0, %w1\n" : : "a" (val), "d" (port) ); } |
/** Double word to port |
* |
116,7 → 116,7 |
* @param port Port to write to |
* @param val Value to write |
*/ |
static inline void outl(__u16 port, __u32 val) { __asm__ volatile ("outl %l0, %w1\n" : : "a" (val), "d" (port) ); } |
static inline void outl(uint16_t port, uint32_t val) { __asm__ volatile ("outl %l0, %w1\n" : : "a" (val), "d" (port) ); } |
/** Byte from port |
* |
125,7 → 125,7 |
* @param port Port to read from |
* @return Value read |
*/ |
static inline __u8 inb(__u16 port) { __u8 val; __asm__ volatile ("inb %w1, %b0 \n" : "=a" (val) : "d" (port) ); return val; } |
static inline uint8_t inb(uint16_t port) { uint8_t val; __asm__ volatile ("inb %w1, %b0 \n" : "=a" (val) : "d" (port) ); return val; } |
/** Word from port |
* |
134,7 → 134,7 |
* @param port Port to read from |
* @return Value read |
*/ |
static inline __u16 inw(__u16 port) { __u16 val; __asm__ volatile ("inw %w1, %w0 \n" : "=a" (val) : "d" (port) ); return val; } |
static inline uint16_t inw(uint16_t port) { uint16_t val; __asm__ volatile ("inw %w1, %w0 \n" : "=a" (val) : "d" (port) ); return val; } |
/** Double word from port |
* |
143,7 → 143,7 |
* @param port Port to read from |
* @return Value read |
*/ |
static inline __u32 inl(__u16 port) { __u32 val; __asm__ volatile ("inl %w1, %l0 \n" : "=a" (val) : "d" (port) ); return val; } |
static inline uint32_t inl(uint16_t port) { uint32_t val; __asm__ volatile ("inl %w1, %l0 \n" : "=a" (val) : "d" (port) ); return val; } |
/** Enable interrupts. |
* |
219,9 → 219,9 |
* The stack is assumed to be STACK_SIZE bytes long. |
* The stack must start on page boundary. |
*/ |
static inline __address get_stack_base(void) |
static inline uintptr_t get_stack_base(void) |
{ |
__address v; |
uintptr_t v; |
__asm__ volatile ("andl %%esp, %0\n" : "=r" (v) : "0" (~(STACK_SIZE-1))); |
228,9 → 228,9 |
return v; |
} |
static inline __u64 rdtsc(void) |
static inline uint64_t rdtsc(void) |
{ |
__u64 v; |
uint64_t v; |
__asm__ volatile("rdtsc\n" : "=A" (v)); |
238,9 → 238,9 |
} |
/** Return current IP address */ |
static inline __address * get_ip() |
static inline uintptr_t * get_ip() |
{ |
__address *ip; |
uintptr_t *ip; |
__asm__ volatile ( |
"mov %%eip, %0" |
253,9 → 253,9 |
* |
* @param addr Address on a page whose TLB entry is to be invalidated. |
*/ |
static inline void invlpg(__address addr) |
static inline void invlpg(uintptr_t addr) |
{ |
__asm__ volatile ("invlpg %0\n" :: "m" (*(__native *)addr)); |
__asm__ volatile ("invlpg %0\n" :: "m" (*(unative_t *)addr)); |
} |
/** Load GDTR register from memory. |
289,7 → 289,7 |
* |
* @param sel Selector specifying descriptor of TSS segment. |
*/ |
static inline void tr_load(__u16 sel) |
static inline void tr_load(uint16_t sel) |
{ |
__asm__ volatile ("ltr %0" : : "r" (sel)); |
} |
/kernel/trunk/arch/ia32/include/faddr.h |
---|
37,7 → 37,7 |
#include <arch/types.h> |
#define FADDR(fptr) ((__address) (fptr)) |
#define FADDR(fptr) ((uintptr_t) (fptr)) |
#endif |
/kernel/trunk/arch/ia32/include/mm/frame.h |
---|
44,7 → 44,7 |
#include <arch/types.h> |
extern __address last_frame; |
extern uintptr_t last_frame; |
extern void frame_arch_init(void); |
/kernel/trunk/arch/ia32/include/mm/page.h |
---|
43,8 → 43,8 |
#ifdef KERNEL |
#ifndef __ASM__ |
# define KA2PA(x) (((__address) (x)) - 0x80000000) |
# define PA2KA(x) (((__address) (x)) + 0x80000000) |
# define KA2PA(x) (((uintptr_t) (x)) - 0x80000000) |
# define PA2KA(x) (((uintptr_t) (x)) + 0x80000000) |
#else |
# define KA2PA(x) ((x) - 0x80000000) |
# define PA2KA(x) ((x) + 0x80000000) |
67,9 → 67,9 |
#define GET_PTL1_ADDRESS_ARCH(ptl0, i) ((pte_t *)((((pte_t *)(ptl0))[(i)].frame_address)<<12)) |
#define GET_PTL2_ADDRESS_ARCH(ptl1, i) (ptl1) |
#define GET_PTL3_ADDRESS_ARCH(ptl2, i) (ptl2) |
#define GET_FRAME_ADDRESS_ARCH(ptl3, i) ((__address)((((pte_t *)(ptl3))[(i)].frame_address)<<12)) |
#define GET_FRAME_ADDRESS_ARCH(ptl3, i) ((uintptr_t)((((pte_t *)(ptl3))[(i)].frame_address)<<12)) |
#define SET_PTL0_ADDRESS_ARCH(ptl0) (write_cr3((__address) (ptl0))) |
#define SET_PTL0_ADDRESS_ARCH(ptl0) (write_cr3((uintptr_t) (ptl0))) |
#define SET_PTL1_ADDRESS_ARCH(ptl0, i, a) (((pte_t *)(ptl0))[(i)].frame_address = (a)>>12) |
#define SET_PTL2_ADDRESS_ARCH(ptl1, i, a) |
#define SET_PTL3_ADDRESS_ARCH(ptl2, i, a) |
85,7 → 85,7 |
#define SET_PTL3_FLAGS_ARCH(ptl2, i, x) |
#define SET_FRAME_FLAGS_ARCH(ptl3, i, x) set_pt_flags((pte_t *)(ptl3), (index_t)(i), (x)) |
#define PTE_VALID_ARCH(p) (*((__u32 *) (p)) != 0) |
#define PTE_VALID_ARCH(p) (*((uint32_t *) (p)) != 0) |
#define PTE_PRESENT_ARCH(p) ((p)->present != 0) |
#define PTE_GET_FRAME_ARCH(p) ((p)->frame_address<<FRAME_WIDTH) |
#define PTE_WRITABLE_ARCH(p) ((p)->writeable != 0) |
/kernel/trunk/arch/ia32/include/context.h |
---|
52,12 → 52,12 |
* function calls. |
*/ |
struct context { |
__address sp; |
__address pc; |
__u32 ebx; |
__u32 esi; |
__u32 edi; |
__u32 ebp; |
uintptr_t sp; |
uintptr_t pc; |
uint32_t ebx; |
uint32_t esi; |
uint32_t edi; |
uint32_t ebp; |
ipl_t ipl; |
} __attribute__ ((packed)); |
/kernel/trunk/arch/ia32/include/drivers/i8259.h |
---|
47,8 → 47,8 |
#define PIC_ICW1 (1<<4) |
extern void i8259_init(void); |
extern void pic_enable_irqs(__u16 irqmask); |
extern void pic_disable_irqs(__u16 irqmask); |
extern void pic_enable_irqs(uint16_t irqmask); |
extern void pic_disable_irqs(uint16_t irqmask); |
extern void pic_eoi(void); |
#endif |
/kernel/trunk/arch/ia32/include/drivers/i8042.h |
---|
45,22 → 45,22 |
#define i8042_DATA 0x60 |
#define i8042_STATUS 0x64 |
static inline void i8042_data_write(__u8 data) |
static inline void i8042_data_write(uint8_t data) |
{ |
outb(i8042_DATA, data); |
} |
static inline __u8 i8042_data_read(void) |
static inline uint8_t i8042_data_read(void) |
{ |
return inb(i8042_DATA); |
} |
static inline __u8 i8042_status_read(void) |
static inline uint8_t i8042_status_read(void) |
{ |
return inb(i8042_STATUS); |
} |
static inline void i8042_command_write(__u8 command) |
static inline void i8042_command_write(uint8_t command) |
{ |
outb(i8042_STATUS, command); |
} |
/kernel/trunk/arch/ia32/src/ia32.c |
---|
132,7 → 132,7 |
* TLS pointer is set in GS register. That means, the GS contains |
* selector, and the descriptor->base is the correct address. |
*/ |
__native sys_tls_set(__native addr) |
unative_t sys_tls_set(unative_t addr) |
{ |
THREAD->arch.tls = addr; |
set_tls_desc(addr); |
/kernel/trunk/arch/ia32/src/fpu_context.c |
---|
108,7 → 108,7 |
void fpu_init() |
{ |
__u32 help0=0,help1=0; |
uint32_t help0=0,help1=0; |
__asm__ volatile ( |
"fninit;\n" |
"stmxcsr %0\n" |
/kernel/trunk/arch/ia32/src/cpu/cpu.c |
---|
98,10 → 98,10 |
cpuid_feature_info fi; |
cpuid_extended_feature_info efi; |
cpu_info_t info; |
__u32 help = 0; |
uint32_t help = 0; |
CPU->arch.tss = tss_p; |
CPU->arch.tss->iomap_base = &CPU->arch.tss->iomap[0] - ((__u8 *) CPU->arch.tss); |
CPU->arch.tss->iomap_base = &CPU->arch.tss->iomap[0] - ((uint8_t *) CPU->arch.tss); |
CPU->fpu_owner = NULL; |
/kernel/trunk/arch/ia32/src/bios/bios.c |
---|
35,12 → 35,12 |
#include <arch/bios/bios.h> |
#include <arch/types.h> |
__address ebda = 0; |
uintptr_t ebda = 0; |
void bios_init(void) |
{ |
/* Copy the EBDA address out from BIOS Data Area */ |
ebda = *((__u16 *) BIOS_EBDA_PTR) * 0x10; |
ebda = *((uint16_t *) BIOS_EBDA_PTR) * 0x10; |
} |
/** @} |
/kernel/trunk/arch/ia32/src/pm.c |
---|
86,10 → 86,10 |
tss_t *tss_p = NULL; |
/* gdtr is changed by kmp before next CPU is initialized */ |
ptr_16_32_t bootstrap_gdtr = { .limit = sizeof(gdt), .base = KA2PA((__address) gdt) }; |
ptr_16_32_t gdtr = { .limit = sizeof(gdt), .base = (__address) gdt }; |
ptr_16_32_t bootstrap_gdtr = { .limit = sizeof(gdt), .base = KA2PA((uintptr_t) gdt) }; |
ptr_16_32_t gdtr = { .limit = sizeof(gdt), .base = (uintptr_t) gdt }; |
void gdt_setbase(descriptor_t *d, __address base) |
void gdt_setbase(descriptor_t *d, uintptr_t base) |
{ |
d->base_0_15 = base & 0xffff; |
d->base_16_23 = ((base) >> 16) & 0xff; |
96,13 → 96,13 |
d->base_24_31 = ((base) >> 24) & 0xff; |
} |
void gdt_setlimit(descriptor_t *d, __u32 limit) |
void gdt_setlimit(descriptor_t *d, uint32_t limit) |
{ |
d->limit_0_15 = limit & 0xffff; |
d->limit_16_19 = (limit >> 16) & 0xf; |
} |
void idt_setoffset(idescriptor_t *d, __address offset) |
void idt_setoffset(idescriptor_t *d, uintptr_t offset) |
{ |
/* |
* Offset is a linear address. |
113,7 → 113,7 |
void tss_initialize(tss_t *t) |
{ |
memsetb((__address) t, sizeof(struct tss), 0); |
memsetb((uintptr_t) t, sizeof(struct tss), 0); |
} |
/* |
139,7 → 139,7 |
d->access |= DPL_USER; |
} |
idt_setoffset(d, ((__address) interrupt_handlers) + i*interrupt_handler_size); |
idt_setoffset(d, ((uintptr_t) interrupt_handlers) + i*interrupt_handler_size); |
exc_register(i, "undef", (iroutine) null_interrupt); |
} |
exc_register(13, "gp_fault", (iroutine) gp_fault); |
182,7 → 182,7 |
* Update addresses in GDT and IDT to their virtual counterparts. |
*/ |
idtr.limit = sizeof(idt); |
idtr.base = (__address) idt; |
idtr.base = (uintptr_t) idt; |
gdtr_load(&gdtr); |
idtr_load(&idtr); |
211,7 → 211,7 |
gdt_p[TSS_DES].special = 1; |
gdt_p[TSS_DES].granularity = 0; |
gdt_setbase(&gdt_p[TSS_DES], (__address) tss_p); |
gdt_setbase(&gdt_p[TSS_DES], (uintptr_t) tss_p); |
gdt_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE - 1); |
/* |
224,7 → 224,7 |
clean_AM_flag(); /* Disable alignment check */ |
} |
void set_tls_desc(__address tls) |
void set_tls_desc(uintptr_t tls) |
{ |
ptr_16_32_t cpugdtr; |
descriptor_t *gdt_p; |
/kernel/trunk/arch/ia32/src/smp/mps.c |
---|
55,11 → 55,11 |
#define FS_SIGNATURE 0x5f504d5f |
#define CT_SIGNATURE 0x504d4350 |
int mps_fs_check(__u8 *base); |
int mps_fs_check(uint8_t *base); |
int mps_ct_check(void); |
int configure_via_ct(void); |
int configure_via_default(__u8 n); |
int configure_via_default(uint8_t n); |
int ct_processor_entry(struct __processor_entry *pr); |
void ct_bus_entry(struct __bus_entry *bus); |
92,7 → 92,7 |
static count_t get_cpu_count(void); |
static bool is_cpu_enabled(index_t i); |
static bool is_bsp(index_t i); |
static __u8 get_cpu_apic_id(index_t i); |
static uint8_t get_cpu_apic_id(index_t i); |
static int mps_irq_to_pin(int irq); |
struct smp_config_operations mps_config_operations = { |
120,7 → 120,7 |
return processor_entries[i].cpu_flags & 0x2; |
} |
__u8 get_cpu_apic_id(index_t i) |
uint8_t get_cpu_apic_id(index_t i) |
{ |
ASSERT(i < processor_entry_cnt); |
return processor_entries[i].l_apic_id; |
130,10 → 130,10 |
/* |
* Used to check the integrity of the MP Floating Structure. |
*/ |
int mps_fs_check(__u8 *base) |
int mps_fs_check(uint8_t *base) |
{ |
int i; |
__u8 sum; |
uint8_t sum; |
for (i = 0, sum = 0; i < 16; i++) |
sum += base[i]; |
146,9 → 146,9 |
*/ |
int mps_ct_check(void) |
{ |
__u8 *base = (__u8 *) ct; |
__u8 *ext = base + ct->base_table_length; |
__u8 sum; |
uint8_t *base = (uint8_t *) ct; |
uint8_t *ext = base + ct->base_table_length; |
uint8_t sum; |
int i; |
/* count the checksum for the base table */ |
167,7 → 167,7 |
void mps_init(void) |
{ |
__u8 *addr[2] = { NULL, (__u8 *) PA2KA(0xf0000) }; |
uint8_t *addr[2] = { NULL, (uint8_t *) PA2KA(0xf0000) }; |
int i, j, length[2] = { 1024, 64*1024 }; |
178,10 → 178,10 |
* 2. search 64K starting at 0xf0000 |
*/ |
addr[0] = (__u8 *) PA2KA(ebda ? ebda : 639 * 1024); |
addr[0] = (uint8_t *) PA2KA(ebda ? ebda : 639 * 1024); |
for (i = 0; i < 2; i++) { |
for (j = 0; j < length[i]; j += 16) { |
if (*((__u32 *) &addr[i][j]) == FS_SIGNATURE && mps_fs_check(&addr[i][j])) { |
if (*((uint32_t *) &addr[i][j]) == FS_SIGNATURE && mps_fs_check(&addr[i][j])) { |
fs = (struct mps_fs *) &addr[i][j]; |
goto fs_found; |
} |
199,7 → 199,7 |
return; |
} |
ct = (struct mps_ct *)PA2KA((__address)fs->configuration_table); |
ct = (struct mps_ct *)PA2KA((uintptr_t)fs->configuration_table); |
config.cpu_count = configure_via_ct(); |
} |
else |
210,7 → 210,7 |
int configure_via_ct(void) |
{ |
__u8 *cur; |
uint8_t *cur; |
int i, cnt; |
if (ct->signature != CT_SIGNATURE) { |
226,7 → 226,7 |
return 1; |
} |
l_apic = (__u32 *)(__address)ct->l_apic; |
l_apic = (uint32_t *)(uintptr_t)ct->l_apic; |
cnt = 0; |
cur = &ct->base_table[0]; |
289,7 → 289,7 |
return cnt; |
} |
int configure_via_default(__u8 n) |
int configure_via_default(uint8_t n) |
{ |
/* |
* Not yet implemented. |
336,7 → 336,7 |
return; |
} |
io_apic = (__u32 *)(__address)ioa->io_apic; |
io_apic = (uint32_t *)(uintptr_t)ioa->io_apic; |
} |
//#define MPSCT_VERBOSE |
404,8 → 404,8 |
void ct_extended_entries(void) |
{ |
__u8 *ext = (__u8 *) ct + ct->base_table_length; |
__u8 *cur; |
uint8_t *ext = (uint8_t *) ct + ct->base_table_length; |
uint8_t *cur; |
for (cur = ext; cur < ext + ct->ext_table_length; cur += cur[CT_EXT_ENTRY_LEN]) { |
switch (cur[CT_EXT_ENTRY_TYPE]) { |
/kernel/trunk/arch/ia32/src/smp/smp.c |
---|
61,7 → 61,7 |
void smp_init(void) |
{ |
__address l_apic_address, io_apic_address; |
uintptr_t l_apic_address, io_apic_address; |
if (acpi_madt) { |
acpi_madt_parse(); |
72,22 → 72,22 |
ops = &mps_config_operations; |
} |
l_apic_address = (__address) frame_alloc(ONE_FRAME, FRAME_ATOMIC | FRAME_KA); |
l_apic_address = (uintptr_t) frame_alloc(ONE_FRAME, FRAME_ATOMIC | FRAME_KA); |
if (!l_apic_address) |
panic("cannot allocate address for l_apic\n"); |
io_apic_address = (__address) frame_alloc(ONE_FRAME, FRAME_ATOMIC | FRAME_KA); |
io_apic_address = (uintptr_t) frame_alloc(ONE_FRAME, FRAME_ATOMIC | FRAME_KA); |
if (!io_apic_address) |
panic("cannot allocate address for io_apic\n"); |
if (config.cpu_count > 1) { |
page_mapping_insert(AS_KERNEL, l_apic_address, (__address) l_apic, |
page_mapping_insert(AS_KERNEL, l_apic_address, (uintptr_t) l_apic, |
PAGE_NOT_CACHEABLE); |
page_mapping_insert(AS_KERNEL, io_apic_address, (__address) io_apic, |
page_mapping_insert(AS_KERNEL, io_apic_address, (uintptr_t) io_apic, |
PAGE_NOT_CACHEABLE); |
l_apic = (__u32 *) l_apic_address; |
io_apic = (__u32 *) io_apic_address; |
l_apic = (uint32_t *) l_apic_address; |
io_apic = (uint32_t *) io_apic_address; |
} |
} |
114,8 → 114,8 |
/* |
* Set the warm-reset vector to the real-mode address of 4K-aligned ap_boot() |
*/ |
*((__u16 *) (PA2KA(0x467+0))) = ((__address) ap_boot) >> 4; /* segment */ |
*((__u16 *) (PA2KA(0x467+2))) = 0; /* offset */ |
*((uint16_t *) (PA2KA(0x467+0))) = ((uintptr_t) ap_boot) >> 4; /* segment */ |
*((uint16_t *) (PA2KA(0x467+2))) = 0; /* offset */ |
/* |
* Save 0xa to address 0xf of the CMOS RAM. |
154,10 → 154,10 |
panic("couldn't allocate memory for GDT\n"); |
memcpy(gdt_new, gdt, GDT_ITEMS * sizeof(struct descriptor)); |
memsetb((__address)(&gdt_new[TSS_DES]), sizeof(struct descriptor), 0); |
memsetb((uintptr_t)(&gdt_new[TSS_DES]), sizeof(struct descriptor), 0); |
protected_ap_gdtr.limit = GDT_ITEMS * sizeof(struct descriptor); |
protected_ap_gdtr.base = KA2PA((__address) gdt_new); |
gdtr.base = (__address) gdt_new; |
protected_ap_gdtr.base = KA2PA((uintptr_t) gdt_new); |
gdtr.base = (uintptr_t) gdt_new; |
if (l_apic_send_init_ipi(ops->cpu_apic_id(i))) { |
/* |
/kernel/trunk/arch/ia32/src/smp/ap.S |
---|
69,7 → 69,7 |
movw %ax, %ds |
movw %ax, %es |
movw %ax, %ss |
movl $KA2PA(ctx), %eax # KA2PA((__address) &ctx) |
movl $KA2PA(ctx), %eax # KA2PA((uintptr_t) &ctx) |
movl (%eax), %esp |
subl $0x80000000, %esp # KA2PA(ctx.sp) |
/kernel/trunk/arch/ia32/src/smp/ipi.c |
---|
39,7 → 39,7 |
void ipi_broadcast_arch(int ipi) |
{ |
(void) l_apic_broadcast_custom_ipi((__u8) ipi); |
(void) l_apic_broadcast_custom_ipi((uint8_t) ipi); |
} |
#endif /* CONFIG_SMP */ |
/kernel/trunk/arch/ia32/src/smp/apic.c |
---|
67,10 → 67,10 |
* optimize the code too much and accesses to l_apic and io_apic, that must |
* always be 32-bit, would use byte oriented instructions. |
*/ |
volatile __u32 *l_apic = (__u32 *) 0xfee00000; |
volatile __u32 *io_apic = (__u32 *) 0xfec00000; |
volatile uint32_t *l_apic = (uint32_t *) 0xfee00000; |
volatile uint32_t *io_apic = (uint32_t *) 0xfec00000; |
__u32 apic_id_mask = 0; |
uint32_t apic_id_mask = 0; |
static int apic_poll_errors(void); |
218,7 → 218,7 |
* |
* @return 0 on failure, 1 on success. |
*/ |
int l_apic_broadcast_custom_ipi(__u8 vector) |
int l_apic_broadcast_custom_ipi(uint8_t vector) |
{ |
icr_t icr; |
248,7 → 248,7 |
* |
* @return 0 on failure, 1 on success. |
*/ |
int l_apic_send_init_ipi(__u8 apicid) |
int l_apic_send_init_ipi(uint8_t apicid) |
{ |
icr_t icr; |
int i; |
305,7 → 305,7 |
*/ |
for (i = 0; i<2; i++) { |
icr.lo = l_apic[ICRlo]; |
icr.vector = ((__address) ap_boot) / 4096; /* calculate the reset vector */ |
icr.vector = ((uintptr_t) ap_boot) / 4096; /* calculate the reset vector */ |
icr.delmod = DELMOD_STARTUP; |
icr.destmod = DESTMOD_PHYS; |
icr.level = LEVEL_ASSERT; |
331,7 → 331,7 |
lvt_tm_t tm; |
ldr_t ldr; |
dfr_t dfr; |
__u32 t1, t2; |
uint32_t t1, t2; |
/* Initialize LVT Error register. */ |
error.value = l_apic[LVT_Err]; |
455,7 → 455,7 |
* |
* @return Local APIC ID. |
*/ |
__u8 l_apic_id(void) |
uint8_t l_apic_id(void) |
{ |
l_apic_id_t idreg; |
469,7 → 469,7 |
* |
* @return Content of the addressed IO APIC register. |
*/ |
__u32 io_apic_read(__u8 address) |
uint32_t io_apic_read(uint8_t address) |
{ |
io_regsel_t regsel; |
484,7 → 484,7 |
* @param address IO APIC register address. |
* @param x Content to be written to the addressed IO APIC register. |
*/ |
void io_apic_write(__u8 address, __u32 x) |
void io_apic_write(uint8_t address, uint32_t x) |
{ |
io_regsel_t regsel; |
501,7 → 501,7 |
* @param v Interrupt vector to trigger. |
* @param flags Flags. |
*/ |
void io_apic_change_ioredtbl(int pin, int dest, __u8 v, int flags) |
void io_apic_change_ioredtbl(int pin, int dest, uint8_t v, int flags) |
{ |
io_redirection_reg_t reg; |
int dlvr = DELMOD_FIXED; |
527,7 → 527,7 |
* |
* @param irqmask Bitmask of IRQs to be masked (0 = do not mask, 1 = mask). |
*/ |
void io_apic_disable_irqs(__u16 irqmask) |
void io_apic_disable_irqs(uint16_t irqmask) |
{ |
io_redirection_reg_t reg; |
int i, pin; |
553,7 → 553,7 |
* |
* @param irqmask Bitmask of IRQs to be unmasked (0 = do not unmask, 1 = unmask). |
*/ |
void io_apic_enable_irqs(__u16 irqmask) |
void io_apic_enable_irqs(uint16_t irqmask) |
{ |
int i, pin; |
io_redirection_reg_t reg; |
/kernel/trunk/arch/ia32/src/ddi/ddi.c |
---|
56,7 → 56,7 |
* |
* @return 0 on success or an error code from errno.h. |
*/ |
int ddi_iospace_enable_arch(task_t *task, __address ioaddr, size_t size) |
int ddi_iospace_enable_arch(task_t *task, uintptr_t ioaddr, size_t size) |
{ |
count_t bits; |
66,13 → 66,13 |
if (task->arch.iomap.bits < bits) { |
bitmap_t oldiomap; |
__u8 *newmap; |
uint8_t *newmap; |
/* |
* The I/O permission bitmap is too small and needs to be grown. |
*/ |
newmap = (__u8 *) malloc(BITS2BYTES(bits), FRAME_ATOMIC); |
newmap = (uint8_t *) malloc(BITS2BYTES(bits), FRAME_ATOMIC); |
if (!newmap) |
return ENOMEM; |
/kernel/trunk/arch/ia32/src/proc/scheduler.c |
---|
58,7 → 58,7 |
*/ |
void before_thread_runs_arch(void) |
{ |
CPU->arch.tss->esp0 = (__address) &THREAD->kstack[THREAD_STACK_SIZE-SP_DELTA]; |
CPU->arch.tss->esp0 = (uintptr_t) &THREAD->kstack[THREAD_STACK_SIZE-SP_DELTA]; |
CPU->arch.tss->ss0 = selector(KDATA_DES); |
/* Set up TLS in GS register */ |
/kernel/trunk/arch/ia32/src/mm/tlb.c |
---|
59,7 → 59,7 |
* @param page Address of the first page whose entry is to be invalidated. |
* @param cnt Number of entries to invalidate. |
*/ |
void tlb_invalidate_pages(asid_t asid, __address page, count_t cnt) |
void tlb_invalidate_pages(asid_t asid, uintptr_t page, count_t cnt) |
{ |
int i; |
/kernel/trunk/arch/ia32/src/mm/frame.c |
---|
51,7 → 51,7 |
size_t hardcoded_unmapped_ktext_size = 0; |
size_t hardcoded_unmapped_kdata_size = 0; |
__address last_frame = 0; |
uintptr_t last_frame = 0; |
static void init_e820_memory(pfn_t minconf) |
{ |
99,9 → 99,9 |
else |
name = "invalid"; |
printf("%.*p %#.16llXB %s\n", |
sizeof(__native) * 2, |
(__native) e820table[i].base_address, |
(__u64) e820table[i].size, |
sizeof(unative_t) * 2, |
(unative_t) e820table[i].base_address, |
(uint64_t) e820table[i].size, |
name); |
} |
return 0; |
/kernel/trunk/arch/ia32/src/mm/memory_init.c |
---|
37,9 → 37,9 |
#include <arch/mm/page.h> |
#include <print.h> |
__u8 e820counter = 0xff; |
uint8_t e820counter = 0xff; |
struct e820memmap_ e820table[MEMMAP_E820_MAX_RECORDS]; |
__u32 e801memorysize; |
uint32_t e801memorysize; |
size_t get_memory_size(void) |
{ |
48,7 → 48,7 |
void memory_print_map(void) |
{ |
__u8 i; |
uint8_t i; |
for (i=0;i<e820counter;i++) { |
printf("E820 base: %#.16llx size: %#.16llx type: ", e820table[i].base_address, e820table[i].size); |
/kernel/trunk/arch/ia32/src/mm/page.c |
---|
51,7 → 51,7 |
void page_arch_init(void) |
{ |
__address cur; |
uintptr_t cur; |
int flags; |
if (config.cpu_active == 1) { |
68,10 → 68,10 |
} |
exc_register(14, "page_fault", (iroutine) page_fault); |
write_cr3((__address) AS_KERNEL->page_table); |
write_cr3((uintptr_t) AS_KERNEL->page_table); |
} |
else { |
write_cr3((__address) AS_KERNEL->page_table); |
write_cr3((uintptr_t) AS_KERNEL->page_table); |
} |
paging_on(); |
78,12 → 78,12 |
} |
__address hw_map(__address physaddr, size_t size) |
uintptr_t hw_map(uintptr_t physaddr, size_t size) |
{ |
if (last_frame + ALIGN_UP(size, PAGE_SIZE) > KA2PA(KERNEL_ADDRESS_SPACE_END_ARCH)) |
panic("Unable to map physical memory %p (%d bytes)", physaddr, size) |
__address virtaddr = PA2KA(last_frame); |
uintptr_t virtaddr = PA2KA(last_frame); |
pfn_t i; |
for (i = 0; i < ADDR2PFN(ALIGN_UP(size, PAGE_SIZE)); i++) |
page_mapping_insert(AS_KERNEL, virtaddr + PFN2ADDR(i), physaddr + PFN2ADDR(i), PAGE_NOT_CACHEABLE); |
95,7 → 95,7 |
void page_fault(int n, istate_t *istate) |
{ |
__address page; |
uintptr_t page; |
pf_access_t access; |
page = read_cr2(); |
/kernel/trunk/arch/ia32/src/interrupt.c |
---|
56,8 → 56,8 |
* Interrupt and exception dispatching. |
*/ |
void (* disable_irqs_function)(__u16 irqmask) = NULL; |
void (* enable_irqs_function)(__u16 irqmask) = NULL; |
void (* disable_irqs_function)(uint16_t irqmask) = NULL; |
void (* enable_irqs_function)(uint16_t irqmask) = NULL; |
void (* eoi_function)(void) = NULL; |
void PRINT_INFO_ERRCODE(istate_t *istate) |
129,7 → 129,7 |
void simd_fp_exception(int n, istate_t *istate) |
{ |
__u32 mxcsr; |
uint32_t mxcsr; |
asm |
( |
"stmxcsr %0;\n" |
136,10 → 136,10 |
:"=m"(mxcsr) |
); |
fault_if_from_uspace(istate, "SIMD FP exception(19), MXCSR: %#zx", |
(__native)mxcsr); |
(unative_t)mxcsr); |
PRINT_INFO_ERRCODE(istate); |
printf("MXCSR: %#zx\n",(__native)(mxcsr)); |
printf("MXCSR: %#zx\n",(unative_t)(mxcsr)); |
panic("SIMD FP exception(19)\n"); |
} |
164,7 → 164,7 |
tlb_shootdown_ipi_recv(); |
} |
void trap_virtual_enable_irqs(__u16 irqmask) |
void trap_virtual_enable_irqs(uint16_t irqmask) |
{ |
if (enable_irqs_function) |
enable_irqs_function(irqmask); |
172,7 → 172,7 |
panic("no enable_irqs_function\n"); |
} |
void trap_virtual_disable_irqs(__u16 irqmask) |
void trap_virtual_disable_irqs(uint16_t irqmask) |
{ |
if (disable_irqs_function) |
disable_irqs_function(irqmask); |
197,7 → 197,7 |
/* Reregister irq to be IPC-ready */ |
void irq_ipc_bind_arch(__native irq) |
void irq_ipc_bind_arch(unative_t irq) |
{ |
if (irq == IRQ_CLK) |
return; |
/kernel/trunk/arch/ia32/src/drivers/vesa.c |
---|
50,11 → 50,11 |
#include <memstr.h> |
#include <bitops.h> |
__u32 vesa_ph_addr; |
__u16 vesa_width; |
__u16 vesa_height; |
__u16 vesa_bpp; |
__u16 vesa_scanline; |
uint32_t vesa_ph_addr; |
uint16_t vesa_width; |
uint16_t vesa_height; |
uint16_t vesa_bpp; |
uint16_t vesa_scanline; |
int vesa_present(void) |
{ |
/kernel/trunk/arch/ia32/src/drivers/i8259.c |
---|
89,9 → 89,9 |
pic_enable_irqs(1<<IRQ_PIC1); /* but enable pic1 */ |
} |
void pic_enable_irqs(__u16 irqmask) |
void pic_enable_irqs(uint16_t irqmask) |
{ |
__u8 x; |
uint8_t x; |
if (irqmask & 0xff) { |
x = inb(PIC_PIC0PORT2); |
103,9 → 103,9 |
} |
} |
void pic_disable_irqs(__u16 irqmask) |
void pic_disable_irqs(uint16_t irqmask) |
{ |
__u8 x; |
uint8_t x; |
if (irqmask & 0xff) { |
x = inb(PIC_PIC0PORT2); |
/kernel/trunk/arch/ia32/src/drivers/i8254.c |
---|
78,9 → 78,9 |
#define SHIFT 11 |
void i8254_calibrate_delay_loop(void) |
{ |
__u64 clk1, clk2; |
__u32 t1, t2, o1, o2; |
__u8 not_ok; |
uint64_t clk1, clk2; |
uint32_t t1, t2, o1, o2; |
uint8_t not_ok; |
/* |
/kernel/trunk/arch/ia32/src/drivers/ega.c |
---|
53,8 → 53,8 |
*/ |
SPINLOCK_INITIALIZE(egalock); |
static __u32 ega_cursor; |
static __u8 *videoram; |
static uint32_t ega_cursor; |
static uint8_t *videoram; |
static void ega_putchar(chardev_t *d, const char ch); |
67,9 → 67,9 |
void ega_init(void) |
{ |
__u8 hi, lo; |
uint8_t hi, lo; |
videoram = (__u8 *) hw_map(VIDEORAM, SCREEN * 2); |
videoram = (uint8_t *) hw_map(VIDEORAM, SCREEN * 2); |
outb(0x3d4, 0xe); |
hi = inb(0x3d5); |
outb(0x3d4, 0xf); |
104,7 → 104,7 |
return; |
memcpy((void *) videoram, (void *) (videoram + ROW * 2), (SCREEN - ROW) * 2); |
memsetw((__address) (videoram + (SCREEN - ROW) * 2), ROW, 0x0720); |
memsetw((uintptr_t) (videoram + (SCREEN - ROW) * 2), ROW, 0x0720); |
ega_cursor = ega_cursor - ROW; |
} |