Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 2081 → Rev 2082

/trunk/kernel/arch/ia64/include/atomic.h
46,7 → 46,7
{
long v;
 
__asm__ volatile ("fetchadd8.rel %0 = %1, %2\n" : "=r" (v), "+m" (val->count) : "i" (imm));
asm volatile ("fetchadd8.rel %0 = %1, %2\n" : "=r" (v), "+m" (val->count) : "i" (imm));
return v;
}
/trunk/kernel/arch/ia64/include/asm.h
49,7 → 49,7
{
uint64_t v;
 
__asm__ volatile ("and %0 = %1, r12" : "=r" (v) : "r" (~(STACK_SIZE-1)));
asm volatile ("and %0 = %1, r12" : "=r" (v) : "r" (~(STACK_SIZE-1)));
return v;
}
62,7 → 62,7
{
uint64_t v;
__asm__ volatile ("mov %0 = psr\n" : "=r" (v));
asm volatile ("mov %0 = psr\n" : "=r" (v));
return v;
}
75,7 → 75,7
{
uint64_t v;
__asm__ volatile ("mov %0 = cr.iva\n" : "=r" (v));
asm volatile ("mov %0 = cr.iva\n" : "=r" (v));
return v;
}
86,7 → 86,7
*/
static inline void iva_write(uint64_t v)
{
__asm__ volatile ("mov cr.iva = %0\n" : : "r" (v));
asm volatile ("mov cr.iva = %0\n" : : "r" (v));
}
 
 
98,7 → 98,7
{
uint64_t v;
__asm__ volatile ("mov %0 = cr.ivr\n" : "=r" (v));
asm volatile ("mov %0 = cr.ivr\n" : "=r" (v));
return v;
}
109,7 → 109,7
*/
static inline void itc_write(uint64_t v)
{
__asm__ volatile ("mov ar.itc = %0\n" : : "r" (v));
asm volatile ("mov ar.itc = %0\n" : : "r" (v));
}
 
/** Read ITC (Interval Timer Counter) register.
120,7 → 120,7
{
uint64_t v;
__asm__ volatile ("mov %0 = ar.itc\n" : "=r" (v));
asm volatile ("mov %0 = ar.itc\n" : "=r" (v));
return v;
}
131,7 → 131,7
*/
static inline void itm_write(uint64_t v)
{
__asm__ volatile ("mov cr.itm = %0\n" : : "r" (v));
asm volatile ("mov cr.itm = %0\n" : : "r" (v));
}
 
/** Read ITM (Interval Timer Match) register.
142,7 → 142,7
{
uint64_t v;
__asm__ volatile ("mov %0 = cr.itm\n" : "=r" (v));
asm volatile ("mov %0 = cr.itm\n" : "=r" (v));
return v;
}
155,7 → 155,7
{
uint64_t v;
__asm__ volatile ("mov %0 = cr.itv\n" : "=r" (v));
asm volatile ("mov %0 = cr.itv\n" : "=r" (v));
return v;
}
166,7 → 166,7
*/
static inline void itv_write(uint64_t v)
{
__asm__ volatile ("mov cr.itv = %0\n" : : "r" (v));
asm volatile ("mov cr.itv = %0\n" : : "r" (v));
}
 
/** Write EOI (End Of Interrupt) register.
175,7 → 175,7
*/
static inline void eoi_write(uint64_t v)
{
__asm__ volatile ("mov cr.eoi = %0\n" : : "r" (v));
asm volatile ("mov cr.eoi = %0\n" : : "r" (v));
}
 
/** Read TPR (Task Priority Register).
186,7 → 186,7
{
uint64_t v;
 
__asm__ volatile ("mov %0 = cr.tpr\n" : "=r" (v));
asm volatile ("mov %0 = cr.tpr\n" : "=r" (v));
return v;
}
197,7 → 197,7
*/
static inline void tpr_write(uint64_t v)
{
__asm__ volatile ("mov cr.tpr = %0\n" : : "r" (v));
asm volatile ("mov cr.tpr = %0\n" : : "r" (v));
}
 
/** Disable interrupts.
211,7 → 211,7
{
uint64_t v;
__asm__ volatile (
asm volatile (
"mov %0 = psr\n"
"rsm %1\n"
: "=r" (v)
232,7 → 232,7
{
uint64_t v;
__asm__ volatile (
asm volatile (
"mov %0 = psr\n"
"ssm %1\n"
";;\n"
270,7 → 270,7
/** Disable protection key checking. */
static inline void pk_disable(void)
{
__asm__ volatile ("rsm %0\n" : : "i" (PSR_PK_MASK));
asm volatile ("rsm %0\n" : : "i" (PSR_PK_MASK));
}
 
extern void cpu_halt(void);
/trunk/kernel/arch/ia64/include/mm/page.h
194,7 → 194,7
{
uint64_t ret;
 
__asm__ volatile ("thash %0 = %1\n" : "=r" (ret) : "r" (va));
asm volatile ("thash %0 = %1\n" : "=r" (ret) : "r" (va));
 
return ret;
}
212,7 → 212,7
{
uint64_t ret;
 
__asm__ volatile ("ttag %0 = %1\n" : "=r" (ret) : "r" (va));
asm volatile ("ttag %0 = %1\n" : "=r" (ret) : "r" (va));
 
return ret;
}
227,7 → 227,7
{
uint64_t ret;
ASSERT(i < REGION_REGISTERS);
__asm__ volatile ("mov %0 = rr[%1]\n" : "=r" (ret) : "r" (i << VRN_SHIFT));
asm volatile ("mov %0 = rr[%1]\n" : "=r" (ret) : "r" (i << VRN_SHIFT));
return ret;
}
 
239,7 → 239,7
static inline void rr_write(index_t i, uint64_t v)
{
ASSERT(i < REGION_REGISTERS);
__asm__ volatile (
asm volatile (
"mov rr[%0] = %1\n"
:
: "r" (i << VRN_SHIFT), "r" (v)
254,7 → 254,7
{
uint64_t ret;
__asm__ volatile ("mov %0 = cr.pta\n" : "=r" (ret));
asm volatile ("mov %0 = cr.pta\n" : "=r" (ret));
return ret;
}
265,7 → 265,7
*/
static inline void pta_write(uint64_t v)
{
__asm__ volatile ("mov cr.pta = %0\n" : : "r" (v));
asm volatile ("mov cr.pta = %0\n" : : "r" (v));
}
 
extern void page_arch_init(void);
/trunk/kernel/arch/ia64/include/barrier.h
41,12 → 41,12
#define CS_ENTER_BARRIER() memory_barrier()
#define CS_LEAVE_BARRIER() memory_barrier()
 
#define memory_barrier() __asm__ volatile ("mf\n" ::: "memory")
#define memory_barrier() asm volatile ("mf\n" ::: "memory")
#define read_barrier() memory_barrier()
#define write_barrier() memory_barrier()
 
#define srlz_i() __asm__ volatile (";; srlz.i ;;\n" ::: "memory")
#define srlz_d() __asm__ volatile (";; srlz.d\n" ::: "memory")
#define srlz_i() asm volatile (";; srlz.i ;;\n" ::: "memory")
#define srlz_d() asm volatile (";; srlz.d\n" ::: "memory")
 
#endif
 
/trunk/kernel/arch/ia64/include/cpu.h
58,7 → 58,7
{
uint64_t v;
__asm__ volatile ("mov %0 = cpuid[%1]\n" : "=r" (v) : "r" (n));
asm volatile ("mov %0 = cpuid[%1]\n" : "=r" (v) : "r" (n));
return v;
}
/trunk/kernel/arch/ia64/src/ia64.c
133,7 → 133,7
psr.ri = 0; /* start with instruction #0 */
psr.bn = 1; /* start in bank 0 */
 
__asm__ volatile ("mov %0 = ar.rsc\n" : "=r" (rsc.value));
asm volatile ("mov %0 = ar.rsc\n" : "=r" (rsc.value));
rsc.loadrs = 0;
rsc.be = false;
rsc.pl = PL_USER;
/trunk/kernel/arch/ia64/src/ski/ski.c
69,7 → 69,7
*/
void ski_putchar(chardev_t *d, const char ch)
{
__asm__ volatile (
asm volatile (
"mov r15 = %0\n"
"mov r32 = %1\n" /* r32 is in0 */
"break 0x80000\n" /* modifies r8 */
95,7 → 95,7
{
uint64_t ch;
__asm__ volatile (
asm volatile (
"mov r15 = %1\n"
"break 0x80000;;\n" /* modifies r8 */
"mov %0 = r8;;\n"
204,7 → 204,7
*/
void ski_init_console(void)
{
__asm__ volatile (
asm volatile (
"mov r15 = %0\n"
"break 0x80000\n"
:
/trunk/kernel/arch/ia64/src/proc/scheduler.c
73,7 → 73,7
* Record address of kernel stack to bank 0 r23.
* These values will be found there after switch from userspace.
*/
__asm__ volatile (
asm volatile (
"bsw.0\n"
"mov r22 = %0\n"
"mov r23 = %1\n"
/trunk/kernel/arch/ia64/src/mm/tlb.c
72,7 → 72,7
 
for(i = 0; i < count1; i++) {
for(j = 0; j < count2; j++) {
__asm__ volatile (
asm volatile (
"ptc.e %0 ;;"
:
: "r" (adr)
179,7 → 179,7
}
/*cnt+=(page!=va);*/
for(; va<(page+cnt*(PAGE_SIZE)); va += (1<<ps)) {
__asm__ volatile (
asm volatile (
"ptc.l %0,%1;;"
:
: "r" (va), "r" (ps<<2)
244,7 → 244,7
srlz_i();
}
__asm__ volatile (
asm volatile (
"mov r8=psr;;\n"
"rsm %0;;\n" /* PSR_IC_MASK */
"srlz.d;;\n"
320,7 → 320,7
srlz_i();
}
 
__asm__ volatile (
asm volatile (
"mov r8=psr;;\n"
"rsm %0;;\n" /* PSR_IC_MASK */
"srlz.d;;\n"
382,7 → 382,7
*/
void dtr_purge(uintptr_t page, count_t width)
{
__asm__ volatile ("ptr.d %0, %1\n" : : "r" (page), "r" (width<<2));
asm volatile ("ptr.d %0, %1\n" : : "r" (page), "r" (width<<2));
}