Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 1093 → Rev 1100

/kernel/trunk/generic/include/synch/spinlock.h
33,6 → 33,7
#include <typedefs.h>
#include <preemption.h>
#include <arch/atomic.h>
#include <debug.h>
 
#ifdef CONFIG_SMP
struct spinlock {
66,12 → 67,36
#endif
 
extern void spinlock_initialize(spinlock_t *sl, char *name);
extern void spinlock_lock(spinlock_t *sl);
extern int spinlock_trylock(spinlock_t *sl);
extern void spinlock_unlock(spinlock_t *sl);
extern void spinlock_lock_debug(spinlock_t *sl);
 
#ifdef CONFIG_DEBUG_SPINLOCK
# define spinlock_lock(x) spinlock_lock_debug(x)
#else
# define spinlock_lock(x) atomic_lock_arch(&(x)->val)
#endif
 
/** Unlock spinlock
*
* Unlock spinlock.
*
* @param sl Pointer to spinlock_t structure.
*/
static inline void spinlock_unlock(spinlock_t *sl)
{
ASSERT(atomic_get(&sl->val) != 0);
 
/*
* Prevent critical section code from bleeding out this way down.
*/
CS_LEAVE_BARRIER();
atomic_set(&sl->val,0);
preemption_enable();
}
 
#else
 
/* On UP systems, spinlocks are effectively left out. */
#define SPINLOCK_DECLARE(name)
#define SPINLOCK_INITIALIZE(name)
/kernel/trunk/generic/include/syscall/syscall.h
55,6 → 55,8
typedef __native (*syshandler_t)();
 
extern syshandler_t syscall_table[SYSCALL_END];
extern __native syscall_handler(__native a1, __native a2, __native a3,
__native a4, __native id);
 
#endif
 
/kernel/trunk/generic/src/synch/spinlock.c
51,7 → 51,6
#endif
}
 
#ifdef CONFIG_DEBUG_SPINLOCK
/** Lock spinlock
*
* Lock spinlock.
60,7 → 59,8
*
* @param sl Pointer to spinlock_t structure.
*/
void spinlock_lock(spinlock_t *sl)
#ifdef CONFIG_DEBUG_SPINLOCK
void spinlock_lock_debug(spinlock_t *sl)
{
count_t i = 0;
char *symbol;
87,32 → 87,7
* Prevent critical section code from bleeding out this way up.
*/
CS_ENTER_BARRIER();
 
}
 
#else
 
/** Lock spinlock
*
* Lock spinlock.
*
* @param sl Pointer to spinlock_t structure.
*/
void spinlock_lock(spinlock_t *sl)
{
preemption_disable();
 
/*
* Each architecture has its own efficient/recommended
* implementation of spinlock.
*/
spinlock_arch(&sl->val);
 
/*
* Prevent critical section code from bleeding out this way up.
*/
CS_ENTER_BARRIER();
}
#endif
 
/** Lock spinlock conditionally
143,23 → 118,4
return rc;
}
 
/** Unlock spinlock
*
* Unlock spinlock.
*
* @param sl Pointer to spinlock_t structure.
*/
void spinlock_unlock(spinlock_t *sl)
{
ASSERT(atomic_get(&sl->val) != 0);
 
/*
* Prevent critical section code from bleeding out this way down.
*/
CS_LEAVE_BARRIER();
atomic_set(&sl->val,0);
preemption_enable();
}
 
#endif
/kernel/trunk/generic/src/lib/func.c
54,7 → 54,7
rundebugger = true;
}
#else
atomic_set(haltstate, 1);
atomic_set(&haltstate, 1);
#endif
 
interrupts_disable();
/kernel/trunk/generic/src/syscall/syscall.c
62,6 → 62,16
return as_remap(AS, (__address) address, size, 0);
}
 
/** Dispatch system call */
__native syscall_handler(__native a1, __native a2, __native a3,
__native a4, __native id)
{
if (id < SYSCALL_END)
return syscall_table[id](a1,a2,a3,a4);
else
panic("Undefined syscall %d", id);
}
 
syshandler_t syscall_table[SYSCALL_END] = {
sys_io,
sys_thread_create,