Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 474 → Rev 475

/SPARTAN/trunk/test/synch/semaphore1/test.c
42,8 → 42,8
static semaphore_t sem;
 
static waitq_t can_start;
static volatile int items_produced;
static volatile int items_consumed;
static atomic_t items_produced;
static atomic_t items_consumed;
 
static void consumer(void *arg);
static void producer(void *arg);
/SPARTAN/trunk/test/synch/rwlock4/test.c
49,7 → 49,7
 
static waitq_t can_start;
 
__u32 seed = 0xdeadbeaf;
__u32 seed = 0xdeadbeef;
 
static __u32 random(__u32 max);
 
/SPARTAN/trunk/test/synch/semaphore2/test.c
44,7 → 44,7
 
static waitq_t can_start;
 
__u32 seed = 0xdeadbeaf;
__u32 seed = 0xdeadbeef;
 
static __u32 random(__u32 max);
 
/SPARTAN/trunk/test/synch/rwlock5/test.c
41,8 → 41,8
static rwlock_t rwlock;
 
static waitq_t can_start;
static volatile int items_read;
static volatile int items_written;
static atomic_t items_read;
static atomic_t items_written;
 
static void writer(void *arg);
static void reader(void *arg);
/SPARTAN/trunk/generic/include/proc/scheduler.h
32,6 → 32,7
#include <synch/spinlock.h>
#include <time/clock.h> /* HZ */
#include <typedefs.h>
#include <arch/atomic.h>
#include <list.h>
 
#define RQ_COUNT 16
43,7 → 44,7
int n; /**< Number of threads in rq_ready. */
};
 
extern volatile count_t nrdy;
extern atomic_t nrdy;
extern void scheduler_init(void);
 
extern void scheduler_fpu_lazy_request(void);
/SPARTAN/trunk/generic/src/proc/scheduler.c
48,7 → 48,7
#include <print.h>
#include <debug.h>
 
volatile count_t nrdy;
atomic_t nrdy;
 
 
/** Take actions before new thread runs
181,7 → 181,7
CPU->nrdy--;
spinlock_unlock(&CPU->lock);
 
atomic_dec((int *) &nrdy);
atomic_dec(&nrdy);
r->n--;
 
/*
557,7 → 557,7
cpu->nrdy--;
spinlock_unlock(&cpu->lock);
 
atomic_dec((int *)&nrdy);
atomic_dec(&nrdy);
 
r->n--;
list_remove(&t->rq_link);
/SPARTAN/trunk/generic/src/proc/thread.c
136,7 → 136,7
r->n++;
spinlock_unlock(&r->lock);
 
atomic_inc((int *) &nrdy);
atomic_inc(&nrdy);
avg = nrdy / config.cpu_active;
 
spinlock_lock(&cpu->lock);
/SPARTAN/trunk/arch/sparc64/include/atomic.h
29,16 → 29,20
#ifndef __sparc64_ATOMIC_H__
#define __sparc64_ATOMIC_H__
 
#include <arch/types.h>
 
typedef volatile __u64 atomic_t;
 
/*
* TODO: these are just placeholders for real implementations of atomic_inc and atomic_dec.
* WARNING: the following functions cause the code to be preemption-unsafe !!!
*/
 
static inline atomic_inc(volatile int *val) {
static inline void atomic_inc(atomic_t *val) {
*val++;
}
 
static inline atomic_dec(volatile int *val) {
static inline void atomic_dec(atomic_t *val) {
*val--;
}
 
/SPARTAN/trunk/arch/ia64/include/atomic.h
29,17 → 29,23
#ifndef __ia64_ATOMIC_H__
#define __ia64_ATOMIC_H__
 
/*
* TODO: these are just placeholders for real implementations of atomic_inc and atomic_dec.
* WARNING: the following functions cause the code to be preemption-unsafe !!!
*/
#include <arch/types.h>
 
static inline atomic_inc(volatile int *val) {
*val++;
}
typedef volatile __u64 atomic_t;
 
static inline atomic_dec(volatile int *val) {
*val--;
static inline atomic_t atomic_add(atomic_t *val, int imm)
{
atomic_t v;
 
/*
* __asm__ volatile ("fetchadd8.rel %0 = %1, %2\n" : "=r" (v), "=m" (val) : "i" (imm));
*/
*val += imm;
return v;
}
 
static inline atomic_t atomic_inc(atomic_t *val) { return atomic_add(val, 1); }
static inline atomic_t atomic_dec(atomic_t *val) { return atomic_add(val, -1); }
 
#endif
/SPARTAN/trunk/arch/ppc32/include/atomic.h
29,16 → 29,20
#ifndef __ppc32_ATOMIC_H__
#define __ppc32_ATOMIC_H__
 
#include <arch/types.h>
 
typedef volatile __u32 atomic_t;
 
/*
* TODO: these are just placeholders for real implementations of atomic_inc and atomic_dec.
* WARNING: the following functions cause the code to be preemption-unsafe !!!
*/
 
static inline atomic_inc(volatile int *val) {
static inline void atomic_inc(atomic_t *val) {
*val++;
}
 
static inline atomic_dec(volatile int *val) {
static inline void atomic_dec(atomic_t *val) {
*val--;
}
 
/SPARTAN/trunk/arch/mips32/include/atomic.h
29,9 → 29,13
#ifndef __mips32_ATOMIC_H__
#define __mips32_ATOMIC_H__
 
#include <arch/types.h>
 
#define atomic_inc(x) (a_add(x,1))
#define atomic_dec(x) (a_sub(x,1))
 
typedef volatile __u32 atomic_t;
 
/*
* Atomic addition
*
41,9 → 45,9
* of the variable to a special register and if another process writes to
* the same location, the SC (store-conditional) instruction fails.
*/
static inline int a_add( volatile int *val, int i)
static inline atomic_t a_add(atomic_t *val, int i)
{
int tmp, tmp2;
atomic_t tmp, tmp2;
 
asm volatile (
" .set push\n"
69,10 → 73,10
*
* Implemented in the same manner as a_add, except we substract the value.
*/
static inline int a_sub( volatile int *val, int i)
static inline atomic_t a_sub(atomic_t *val, int i)
 
{
int tmp, tmp2;
atomic_t tmp, tmp2;
 
asm volatile (
" .set push\n"
/SPARTAN/trunk/arch/ia32/include/atomic.h
31,7 → 31,9
 
#include <arch/types.h>
 
static inline void atomic_inc(volatile int *val) {
typedef volatile __u32 atomic_t;
 
static inline void atomic_inc(atomic_t *val) {
#ifdef CONFIG_SMP
__asm__ volatile ("lock incl %0\n" : "=m" (*val));
#else
39,7 → 41,7
#endif /* CONFIG_SMP */
}
 
static inline void atomic_dec(volatile int *val) {
static inline void atomic_dec(atomic_t *val) {
#ifdef CONFIG_SMP
__asm__ volatile ("lock decl %0\n" : "=m" (*val));
#else