Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 1103 → Rev 1104

/kernel/trunk/test/synch/rwlock1/test.c
28,7 → 28,7
 
#include <test.h>
#include <arch.h>
#include <arch/atomic.h>
#include <atomic.h>
#include <print.h>
#include <proc/thread.h>
 
/kernel/trunk/test/synch/rwlock2/test.c
28,7 → 28,7
 
#include <test.h>
#include <arch.h>
#include <arch/atomic.h>
#include <atomic.h>
#include <print.h>
#include <proc/thread.h>
 
/kernel/trunk/test/synch/rwlock3/test.c
28,7 → 28,7
 
#include <test.h>
#include <arch.h>
#include <arch/atomic.h>
#include <atomic.h>
#include <print.h>
#include <proc/thread.h>
 
/kernel/trunk/test/synch/semaphore1/test.c
28,7 → 28,7
 
#include <test.h>
#include <arch.h>
#include <arch/atomic.h>
#include <atomic.h>
#include <print.h>
#include <proc/thread.h>
 
/kernel/trunk/test/synch/rwlock4/test.c
28,7 → 28,7
 
#include <test.h>
#include <arch.h>
#include <arch/atomic.h>
#include <atomic.h>
#include <print.h>
#include <proc/thread.h>
#include <arch/types.h>
/kernel/trunk/test/synch/semaphore2/test.c
28,7 → 28,7
 
#include <test.h>
#include <arch.h>
#include <arch/atomic.h>
#include <atomic.h>
#include <print.h>
#include <proc/thread.h>
#include <arch/types.h>
/kernel/trunk/test/synch/rwlock5/test.c
28,7 → 28,7
 
#include <test.h>
#include <arch.h>
#include <arch/atomic.h>
#include <atomic.h>
#include <print.h>
#include <proc/thread.h>
 
/kernel/trunk/test/debug/mips1/test.c
31,7 → 31,7
#include <panic.h>
 
#include <test.h>
#include <arch/atomic.h>
#include <atomic.h>
#include <proc/thread.h>
#include <time/delay.h>
 
/kernel/trunk/test/thread/thread1/test.c
32,7 → 32,7
#include <panic.h>
 
#include <test.h>
#include <arch/atomic.h>
#include <atomic.h>
#include <proc/thread.h>
 
#include <arch.h>
/kernel/trunk/test/mm/falloc2/test.c
32,7 → 32,7
#include <mm/slab.h>
#include <arch/mm/page.h>
#include <arch/types.h>
#include <arch/atomic.h>
#include <atomic.h>
#include <debug.h>
#include <proc/thread.h>
#include <memstr.h>
/kernel/trunk/test/fpu/mips1/test.c
31,7 → 31,7
#include <panic.h>
 
#include <test.h>
#include <arch/atomic.h>
#include <atomic.h>
#include <proc/thread.h>
#include <time/delay.h>
 
/kernel/trunk/test/fpu/fpu1/test.c
32,7 → 32,7
#include <panic.h>
 
#include <test.h>
#include <arch/atomic.h>
#include <atomic.h>
#include <proc/thread.h>
 
#include <arch.h>
/kernel/trunk/test/fpu/sse1/test.c
31,7 → 31,7
#include <panic.h>
 
#include <test.h>
#include <arch/atomic.h>
#include <atomic.h>
#include <proc/thread.h>
#include <time/delay.h>
 
/kernel/trunk/test/fault/fault1/test.c
32,7 → 32,7
#include <panic.h>
 
#include <test.h>
#include <arch/atomic.h>
#include <atomic.h>
#include <proc/thread.h>
 
#include <arch.h>
/kernel/trunk/test/atomic/atomic1/test.c
28,7 → 28,7
 
#include <test.h>
#include <print.h>
#include <arch/atomic.h>
#include <atomic.h>
#include <debug.h>
 
void test(void)
/kernel/trunk/generic/include/func.h
31,7 → 31,7
 
#include <arch/types.h>
#include <typedefs.h>
#include <arch/atomic.h>
#include <atomic.h>
 
extern atomic_t haltstate;
 
/kernel/trunk/generic/include/proc/scheduler.h
32,7 → 32,7
#include <synch/spinlock.h>
#include <time/clock.h> /* HZ */
#include <typedefs.h>
#include <arch/atomic.h>
#include <atomic.h>
#include <adt/list.h>
 
#define RQ_COUNT 16
/kernel/trunk/generic/include/synch/spinlock.h
32,7 → 32,7
#include <arch/types.h>
#include <typedefs.h>
#include <preemption.h>
#include <arch/atomic.h>
#include <atomic.h>
#include <debug.h>
 
#ifdef CONFIG_SMP
/kernel/trunk/generic/include/atomic.h
0,0 → 1,49
/*
* Copyright (C) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifndef __ATOMIC_H__
#define __ATOMIC_H__
 
struct atomic {
volatile long count;
};
 
#include <arch/atomic.h>
#include <typedefs.h>
 
static inline void atomic_set(atomic_t *val, long i)
{
val->count = i;
}
 
static inline long atomic_get(atomic_t *val)
{
return val->count;
}
 
#endif
/kernel/trunk/generic/include/mm/slab.h
31,7 → 31,7
 
#include <adt/list.h>
#include <synch/spinlock.h>
#include <arch/atomic.h>
#include <atomic.h>
#include <mm/frame.h>
 
/** Minimum size to be allocated by malloc */
/kernel/trunk/generic/include/typedefs.h
92,4 → 92,6
typedef struct btree_node btree_node_t;
typedef struct btree btree_t;
 
typedef struct atomic atomic_t;
 
#endif
/kernel/trunk/generic/src/console/console.c
36,7 → 36,7
#include <arch.h>
#include <func.h>
#include <print.h>
#include <arch/atomic.h>
#include <atomic.h>
 
#define BUFLEN 2048
static char debug_buffer[BUFLEN];
/kernel/trunk/generic/src/proc/scheduler.c
34,7 → 34,7
#include <mm/as.h>
#include <arch/asm.h>
#include <arch/faddr.h>
#include <arch/atomic.h>
#include <atomic.h>
#include <synch/spinlock.h>
#include <config.h>
#include <context.h>
/kernel/trunk/generic/src/proc/thread.c
49,7 → 49,7
#include <arch/interrupt.h>
#include <smp/ipi.h>
#include <arch/faddr.h>
#include <arch/atomic.h>
#include <atomic.h>
#include <memstr.h>
#include <print.h>
#include <mm/slab.h>
/kernel/trunk/generic/src/synch/spinlock.c
27,7 → 27,7
*/
 
#include <synch/spinlock.h>
#include <arch/atomic.h>
#include <atomic.h>
#include <arch/barrier.h>
#include <arch.h>
#include <preemption.h>
/kernel/trunk/generic/src/mm/tlb.c
32,7 → 32,7
#include <smp/ipi.h>
#include <synch/spinlock.h>
#include <typedefs.h>
#include <arch/atomic.h>
#include <atomic.h>
#include <arch/interrupt.h>
#include <config.h>
#include <arch.h>
/kernel/trunk/generic/src/time/clock.c
38,7 → 38,7
#include <print.h>
#include <arch.h>
#include <adt/list.h>
#include <arch/atomic.h>
#include <atomic.h>
#include <proc/thread.h>
 
/** Clock routine
/kernel/trunk/arch/sparc64/include/atomic.h
30,9 → 30,8
#define __sparc64_ATOMIC_H__
 
#include <arch/types.h>
#include <typedefs.h>
 
typedef struct { volatile __u64 count; } atomic_t;
 
/** Atomic add operation.
*
* Use atomic compare and swap operation to atomically add signed value.
42,7 → 41,7
*
* @return Value of the atomic variable as it existed before addition.
*/
static inline count_t atomic_add(atomic_t *val, int i)
static inline long atomic_add(atomic_t *val, int i)
{
__u64 a, b;
volatile __u64 x = (__u64) &val->count;
62,22 → 61,22
return a;
}
 
static inline count_t atomic_preinc(atomic_t *val)
static inline long atomic_preinc(atomic_t *val)
{
return atomic_add(val, 1) + 1;
}
 
static inline count_t atomic_postinc(atomic_t *val)
static inline long atomic_postinc(atomic_t *val)
{
return atomic_add(val, 1);
}
 
static inline count_t atomic_predec(atomic_t *val)
static inline long atomic_predec(atomic_t *val)
{
return atomic_add(val, -1) - 1;
}
 
static inline count_t atomic_postdec(atomic_t *val)
static inline long atomic_postdec(atomic_t *val)
{
return atomic_add(val, -1);
}
92,14 → 91,4
(void) atomic_add(val, -1);
}
 
static inline void atomic_set(atomic_t *val, __u64 i)
{
val->count = i;
}
 
static inline __u64 atomic_get(atomic_t *val)
{
return val->count;
}
 
#endif
/kernel/trunk/arch/ia64/include/atomic.h
30,8 → 30,8
#define __ia64_ATOMIC_H__
 
#include <arch/types.h>
#include <typedefs.h>
 
typedef struct { volatile __u64 count; } atomic_t;
 
/** Atomic addition.
*
40,9 → 40,9
*
* @return Value before addition.
*/
static inline count_t atomic_add(atomic_t *val, int imm)
static inline long atomic_add(atomic_t *val, int imm)
{
count_t v;
long v;
 
__asm__ volatile ("fetchadd8.rel %0 = %1, %2\n" : "=r" (v), "+m" (val->count) : "i" (imm));
49,23 → 49,13
return v;
}
 
static inline void atomic_set(atomic_t *val, __u64 i)
{
val->count = i;
}
 
static inline __u32 atomic_get(atomic_t *val)
{
return val->count;
}
 
static inline void atomic_inc(atomic_t *val) { atomic_add(val, 1); }
static inline void atomic_dec(atomic_t *val) { atomic_add(val, -1); }
 
static inline count_t atomic_preinc(atomic_t *val) { return atomic_add(val, 1) + 1; }
static inline count_t atomic_predec(atomic_t *val) { return atomic_add(val, -1) - 1; }
static inline long atomic_preinc(atomic_t *val) { return atomic_add(val, 1) + 1; }
static inline long atomic_predec(atomic_t *val) { return atomic_add(val, -1) - 1; }
 
static inline count_t atomic_postinc(atomic_t *val) { return atomic_add(val, 1); }
static inline count_t atomic_postdec(atomic_t *val) { return atomic_add(val, -1); }
static inline long atomic_postinc(atomic_t *val) { return atomic_add(val, 1); }
static inline long atomic_postdec(atomic_t *val) { return atomic_add(val, -1); }
 
#endif
/kernel/trunk/arch/ia64/src/mm/tlb.c
438,7 → 438,7
*/
page_table_unlock(AS, true);
if (!as_page_fault(va)) {
panic("%s: va=%P, rid=%d\n", __FUNCTION__, istate->cr_ifa, rr.map.rid);
panic("%s: va=%P, rid=%d, iip=%P\n", __FUNCTION__, istate->cr_ifa, rr.map.rid, istate->cr_iip);
}
}
}
/kernel/trunk/arch/ppc32/include/atomic.h
30,9 → 30,8
#define __ppc32_ATOMIC_H__
 
#include <arch/types.h>
#include <typedefs.h>
 
typedef struct { volatile __u32 count; } atomic_t;
 
static inline void atomic_inc(atomic_t *val)
{
__u32 tmp;
63,38 → 62,28
: "cc");
}
 
static inline __u32 atomic_postinc(atomic_t *val)
static inline long atomic_postinc(atomic_t *val)
{
atomic_inc(val);
return val->count - 1;
}
 
static inline __u32 atomic_postdec(atomic_t *val)
static inline long atomic_postdec(atomic_t *val)
{
atomic_dec(val);
return val->count + 1;
}
 
static inline __u32 atomic_preinc(atomic_t *val)
static inline long atomic_preinc(atomic_t *val)
{
atomic_inc(val);
return val->count;
}
 
static inline __u32 atomic_predec(atomic_t *val)
static inline long atomic_predec(atomic_t *val)
{
atomic_dec(val);
return val->count;
}
 
static inline void atomic_set(atomic_t *val, __u32 i)
{
val->count = i;
}
 
static inline __u32 atomic_get(atomic_t *val)
{
return val->count;
}
 
#endif
/kernel/trunk/arch/amd64/include/atomic.h
32,19 → 32,8
#include <arch/types.h>
#include <arch/barrier.h>
#include <preemption.h>
#include <typedefs.h>
 
typedef struct { volatile __u64 count; } atomic_t;
 
static inline void atomic_set(atomic_t *val, __u64 i)
{
val->count = i;
}
 
static inline __u64 atomic_get(atomic_t *val)
{
return val->count;
}
 
static inline void atomic_inc(atomic_t *val) {
#ifdef CONFIG_SMP
__asm__ volatile ("lock incq %0\n" : "=m" (val->count));
61,9 → 50,9
#endif /* CONFIG_SMP */
}
 
static inline count_t atomic_postinc(atomic_t *val)
static inline long atomic_postinc(atomic_t *val)
{
count_t r;
long r;
 
__asm__ volatile (
"movq $1, %0\n"
74,9 → 63,9
return r;
}
 
static inline count_t atomic_postdec(atomic_t *val)
static inline long atomic_postdec(atomic_t *val)
{
count_t r;
long r;
__asm__ volatile (
"movq $-1, %0\n"
103,7 → 92,7
}
 
 
/** AMD64 specific fast spinlock */
/** amd64 specific fast spinlock */
static inline void atomic_lock_arch(atomic_t *val)
{
__u64 tmp;
116,7 → 105,7
#endif
"mov %0, %1;"
"testq %1, %1;"
"jnz 0b;" /* Leightweight looping on locked spinlock */
"jnz 0b;" /* Lightweight looping on locked spinlock */
"incq %1;" /* now use the atomic operation */
"xchgq %0, %1;"
/kernel/trunk/arch/amd64/src/proc/scheduler.c
33,8 → 33,8
#include <arch/context.h> /* SP_DELTA */
#include <arch/asm.h>
#include <arch/debugger.h>
#include <print.h>
 
#include <print.h>
void before_thread_runs_arch(void)
{
CPU->arch.tss->rsp0 = (__address) &THREAD->kstack[THREAD_STACK_SIZE-SP_DELTA];
/kernel/trunk/arch/mips32/include/atomic.h
30,6 → 30,7
#define __mips32_ATOMIC_H__
 
#include <arch/types.h>
#include <typedefs.h>
 
#define atomic_inc(x) ((void) atomic_add(x, 1))
#define atomic_dec(x) ((void) atomic_add(x, -1))
40,8 → 41,6
#define atomic_preinc(x) atomic_add(x, 1)
#define atomic_predec(x) atomic_add(x, -1)
 
typedef struct { volatile __u32 count; } atomic_t;
 
/* Atomic addition of immediate value.
*
* @param val Memory location to which will be the immediate value added.
49,9 → 48,9
*
* @return Value after addition.
*/
static inline count_t atomic_add(atomic_t *val, int i)
static inline long atomic_add(atomic_t *val, int i)
{
count_t tmp, v;
long tmp, v;
 
__asm__ volatile (
"1:\n"
68,16 → 67,4
return v;
}
 
/* Reads/writes are atomic on mips for 4-bytes */
 
static inline void atomic_set(atomic_t *val, __u32 i)
{
val->count = i;
}
 
static inline __u32 atomic_get(atomic_t *val)
{
return val->count;
}
 
#endif
/kernel/trunk/arch/ia32/include/atomic.h
32,19 → 32,8
#include <arch/types.h>
#include <arch/barrier.h>
#include <preemption.h>
#include <typedefs.h>
 
typedef struct { volatile __u32 count; } atomic_t;
 
static inline void atomic_set(atomic_t *val, __u32 i)
{
val->count = i;
}
 
static inline __u32 atomic_get(atomic_t *val)
{
return val->count;
}
 
static inline void atomic_inc(atomic_t *val) {
#ifdef CONFIG_SMP
__asm__ volatile ("lock incl %0\n" : "=m" (val->count));
61,9 → 50,9
#endif /* CONFIG_SMP */
}
 
static inline count_t atomic_postinc(atomic_t *val)
static inline long atomic_postinc(atomic_t *val)
{
count_t r;
long r;
 
__asm__ volatile (
"movl $1, %0\n"
74,9 → 63,9
return r;
}
 
static inline count_t atomic_postdec(atomic_t *val)
static inline long atomic_postdec(atomic_t *val)
{
count_t r;
long r;
__asm__ volatile (
"movl $-1, %0\n"
102,7 → 91,7
return v;
}
 
/** Ia32 specific fast spinlock */
/** ia32 specific fast spinlock */
static inline void atomic_lock_arch(atomic_t *val)
{
__u32 tmp;
115,7 → 104,7
#endif
"mov %0, %1;"
"testl %1, %1;"
"jnz 0b;" /* Leightweight looping on locked spinlock */
"jnz 0b;" /* Lightweight looping on locked spinlock */
"incl %1;" /* now use the atomic operation */
"xchgl %0, %1;"
/kernel/trunk/arch/ia32/src/userspace.c
61,7 → 61,7
"pushl %3\n"
"pushl %4\n"
"movl %5, %%eax\n"
"iret"
"iret\n"
:
: "i" (selector(UDATA_DES) | PL_USER), "r" (kernel_uarg->uspace_stack+THREAD_STACK_SIZE),
"r" (ipl), "i" (selector(UTEXT_DES) | PL_USER), "r" (kernel_uarg->uspace_entry),