Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 1103 → Rev 1104

/kernel/trunk/arch/sparc64/include/atomic.h
30,9 → 30,8
#define __sparc64_ATOMIC_H__
 
#include <arch/types.h>
#include <typedefs.h>
 
typedef struct { volatile __u64 count; } atomic_t;
 
/** Atomic add operation.
*
* Use atomic compare and swap operation to atomically add signed value.
42,7 → 41,7
*
* @return Value of the atomic variable as it existed before addition.
*/
static inline count_t atomic_add(atomic_t *val, int i)
static inline long atomic_add(atomic_t *val, int i)
{
__u64 a, b;
volatile __u64 x = (__u64) &val->count;
62,22 → 61,22
return a;
}
 
static inline count_t atomic_preinc(atomic_t *val)
static inline long atomic_preinc(atomic_t *val)
{
return atomic_add(val, 1) + 1;
}
 
static inline count_t atomic_postinc(atomic_t *val)
static inline long atomic_postinc(atomic_t *val)
{
return atomic_add(val, 1);
}
 
static inline count_t atomic_predec(atomic_t *val)
static inline long atomic_predec(atomic_t *val)
{
return atomic_add(val, -1) - 1;
}
 
static inline count_t atomic_postdec(atomic_t *val)
static inline long atomic_postdec(atomic_t *val)
{
return atomic_add(val, -1);
}
92,14 → 91,4
(void) atomic_add(val, -1);
}
 
static inline void atomic_set(atomic_t *val, __u64 i)
{
val->count = i;
}
 
static inline __u64 atomic_get(atomic_t *val)
{
return val->count;
}
 
#endif
/kernel/trunk/arch/ia64/include/atomic.h
30,8 → 30,8
#define __ia64_ATOMIC_H__
 
#include <arch/types.h>
#include <typedefs.h>
 
typedef struct { volatile __u64 count; } atomic_t;
 
/** Atomic addition.
*
40,9 → 40,9
*
* @return Value before addition.
*/
static inline count_t atomic_add(atomic_t *val, int imm)
static inline long atomic_add(atomic_t *val, int imm)
{
count_t v;
long v;
 
__asm__ volatile ("fetchadd8.rel %0 = %1, %2\n" : "=r" (v), "+m" (val->count) : "i" (imm));
49,23 → 49,13
return v;
}
 
static inline void atomic_set(atomic_t *val, __u64 i)
{
val->count = i;
}
 
static inline __u32 atomic_get(atomic_t *val)
{
return val->count;
}
 
static inline void atomic_inc(atomic_t *val) { atomic_add(val, 1); }
static inline void atomic_dec(atomic_t *val) { atomic_add(val, -1); }
 
static inline count_t atomic_preinc(atomic_t *val) { return atomic_add(val, 1) + 1; }
static inline count_t atomic_predec(atomic_t *val) { return atomic_add(val, -1) - 1; }
static inline long atomic_preinc(atomic_t *val) { return atomic_add(val, 1) + 1; }
static inline long atomic_predec(atomic_t *val) { return atomic_add(val, -1) - 1; }
 
static inline count_t atomic_postinc(atomic_t *val) { return atomic_add(val, 1); }
static inline count_t atomic_postdec(atomic_t *val) { return atomic_add(val, -1); }
static inline long atomic_postinc(atomic_t *val) { return atomic_add(val, 1); }
static inline long atomic_postdec(atomic_t *val) { return atomic_add(val, -1); }
 
#endif
/kernel/trunk/arch/ia64/src/mm/tlb.c
438,7 → 438,7
*/
page_table_unlock(AS, true);
if (!as_page_fault(va)) {
panic("%s: va=%P, rid=%d\n", __FUNCTION__, istate->cr_ifa, rr.map.rid);
panic("%s: va=%P, rid=%d, iip=%P\n", __FUNCTION__, istate->cr_ifa, rr.map.rid, istate->cr_iip);
}
}
}
/kernel/trunk/arch/ppc32/include/atomic.h
30,9 → 30,8
#define __ppc32_ATOMIC_H__
 
#include <arch/types.h>
#include <typedefs.h>
 
typedef struct { volatile __u32 count; } atomic_t;
 
static inline void atomic_inc(atomic_t *val)
{
__u32 tmp;
63,38 → 62,28
: "cc");
}
 
static inline __u32 atomic_postinc(atomic_t *val)
static inline long atomic_postinc(atomic_t *val)
{
atomic_inc(val);
return val->count - 1;
}
 
static inline __u32 atomic_postdec(atomic_t *val)
static inline long atomic_postdec(atomic_t *val)
{
atomic_dec(val);
return val->count + 1;
}
 
static inline __u32 atomic_preinc(atomic_t *val)
static inline long atomic_preinc(atomic_t *val)
{
atomic_inc(val);
return val->count;
}
 
static inline __u32 atomic_predec(atomic_t *val)
static inline long atomic_predec(atomic_t *val)
{
atomic_dec(val);
return val->count;
}
 
static inline void atomic_set(atomic_t *val, __u32 i)
{
val->count = i;
}
 
static inline __u32 atomic_get(atomic_t *val)
{
return val->count;
}
 
#endif
/kernel/trunk/arch/amd64/include/atomic.h
32,19 → 32,8
#include <arch/types.h>
#include <arch/barrier.h>
#include <preemption.h>
#include <typedefs.h>
 
typedef struct { volatile __u64 count; } atomic_t;
 
static inline void atomic_set(atomic_t *val, __u64 i)
{
val->count = i;
}
 
static inline __u64 atomic_get(atomic_t *val)
{
return val->count;
}
 
static inline void atomic_inc(atomic_t *val) {
#ifdef CONFIG_SMP
__asm__ volatile ("lock incq %0\n" : "=m" (val->count));
61,9 → 50,9
#endif /* CONFIG_SMP */
}
 
static inline count_t atomic_postinc(atomic_t *val)
static inline long atomic_postinc(atomic_t *val)
{
count_t r;
long r;
 
__asm__ volatile (
"movq $1, %0\n"
74,9 → 63,9
return r;
}
 
static inline count_t atomic_postdec(atomic_t *val)
static inline long atomic_postdec(atomic_t *val)
{
count_t r;
long r;
__asm__ volatile (
"movq $-1, %0\n"
103,7 → 92,7
}
 
 
/** AMD64 specific fast spinlock */
/** amd64 specific fast spinlock */
static inline void atomic_lock_arch(atomic_t *val)
{
__u64 tmp;
116,7 → 105,7
#endif
"mov %0, %1;"
"testq %1, %1;"
"jnz 0b;" /* Leightweight looping on locked spinlock */
"jnz 0b;" /* Lightweight looping on locked spinlock */
"incq %1;" /* now use the atomic operation */
"xchgq %0, %1;"
/kernel/trunk/arch/amd64/src/proc/scheduler.c
33,8 → 33,8
#include <arch/context.h> /* SP_DELTA */
#include <arch/asm.h>
#include <arch/debugger.h>
#include <print.h>
 
#include <print.h>
void before_thread_runs_arch(void)
{
CPU->arch.tss->rsp0 = (__address) &THREAD->kstack[THREAD_STACK_SIZE-SP_DELTA];
/kernel/trunk/arch/mips32/include/atomic.h
30,6 → 30,7
#define __mips32_ATOMIC_H__
 
#include <arch/types.h>
#include <typedefs.h>
 
#define atomic_inc(x) ((void) atomic_add(x, 1))
#define atomic_dec(x) ((void) atomic_add(x, -1))
40,8 → 41,6
#define atomic_preinc(x) atomic_add(x, 1)
#define atomic_predec(x) atomic_add(x, -1)
 
typedef struct { volatile __u32 count; } atomic_t;
 
/* Atomic addition of immediate value.
*
* @param val Memory location to which will be the immediate value added.
49,9 → 48,9
*
* @return Value after addition.
*/
static inline count_t atomic_add(atomic_t *val, int i)
static inline long atomic_add(atomic_t *val, int i)
{
count_t tmp, v;
long tmp, v;
 
__asm__ volatile (
"1:\n"
68,16 → 67,4
return v;
}
 
/* Reads/writes are atomic on mips for 4-bytes */
 
static inline void atomic_set(atomic_t *val, __u32 i)
{
val->count = i;
}
 
static inline __u32 atomic_get(atomic_t *val)
{
return val->count;
}
 
#endif
/kernel/trunk/arch/ia32/include/atomic.h
32,19 → 32,8
#include <arch/types.h>
#include <arch/barrier.h>
#include <preemption.h>
#include <typedefs.h>
 
typedef struct { volatile __u32 count; } atomic_t;
 
static inline void atomic_set(atomic_t *val, __u32 i)
{
val->count = i;
}
 
static inline __u32 atomic_get(atomic_t *val)
{
return val->count;
}
 
static inline void atomic_inc(atomic_t *val) {
#ifdef CONFIG_SMP
__asm__ volatile ("lock incl %0\n" : "=m" (val->count));
61,9 → 50,9
#endif /* CONFIG_SMP */
}
 
static inline count_t atomic_postinc(atomic_t *val)
static inline long atomic_postinc(atomic_t *val)
{
count_t r;
long r;
 
__asm__ volatile (
"movl $1, %0\n"
74,9 → 63,9
return r;
}
 
static inline count_t atomic_postdec(atomic_t *val)
static inline long atomic_postdec(atomic_t *val)
{
count_t r;
long r;
__asm__ volatile (
"movl $-1, %0\n"
102,7 → 91,7
return v;
}
 
/** Ia32 specific fast spinlock */
/** ia32 specific fast spinlock */
static inline void atomic_lock_arch(atomic_t *val)
{
__u32 tmp;
115,7 → 104,7
#endif
"mov %0, %1;"
"testl %1, %1;"
"jnz 0b;" /* Leightweight looping on locked spinlock */
"jnz 0b;" /* Lightweight looping on locked spinlock */
"incl %1;" /* now use the atomic operation */
"xchgl %0, %1;"
/kernel/trunk/arch/ia32/src/userspace.c
61,7 → 61,7
"pushl %3\n"
"pushl %4\n"
"movl %5, %%eax\n"
"iret"
"iret\n"
:
: "i" (selector(UDATA_DES) | PL_USER), "r" (kernel_uarg->uspace_stack+THREAD_STACK_SIZE),
"r" (ipl), "i" (selector(UTEXT_DES) | PL_USER), "r" (kernel_uarg->uspace_entry),