Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 622 → Rev 623

/kernel/trunk/test/synch/rwlock4/test.c
39,6 → 39,7
#include <synch/waitq.h>
#include <synch/rwlock.h>
#include <synch/synch.h>
#include <synch/spinlock.h>
 
#define READERS 50
#define WRITERS 50
45,7 → 46,7
 
static rwlock_t rwlock;
 
static spinlock_t lock;
SPINLOCK_INITIALIZE(lock);
 
static waitq_t can_start;
 
/kernel/trunk/test/synch/semaphore2/test.c
37,10 → 37,11
#include <synch/waitq.h>
#include <synch/semaphore.h>
#include <synch/synch.h>
#include <synch/spinlock.h>
 
static semaphore_t sem;
 
static spinlock_t lock;
SPINLOCK_INITIALIZE(lock);
 
static waitq_t can_start;
 
/kernel/trunk/test/thread/thread1/test.c
37,11 → 37,8
 
#include <arch.h>
 
 
 
#define THREADS 5
 
 
static void thread(void *data)
{
while(1)
51,21 → 48,15
}
}
 
 
 
void test(void)
{
thread_t *t;
int i;
 
 
 
for (i=0; i<THREADS; i++)
{
for (i=0; i<THREADS; i++) {
if (!(t = thread_create(thread, NULL, TASK, 0)))
panic("could not create thread\n");
thread_ready(t);
}
printf("ok\n");
}
/kernel/trunk/generic/include/time/timeout.h
39,7 → 39,7
typedef void (* timeout_handler_t)(void *arg);
 
struct timeout {
spinlock_t lock;
SPINLOCK_DECLARE(lock);
 
link_t link; /**< Link to the list of active timeouts on THE->cpu */
/kernel/trunk/generic/include/proc/scheduler.h
40,7 → 40,7
 
/** Scheduler run queue structure. */
struct runq {
spinlock_t lock;
SPINLOCK_DECLARE(lock);
link_t rq_head; /**< List of ready threads. */
count_t n; /**< Number of threads in rq_ready. */
};
/kernel/trunk/generic/include/proc/task.h
34,7 → 34,7
#include <list.h>
 
struct task {
spinlock_t lock;
SPINLOCK_DECLARE(lock);
link_t th_head; /**< List of threads contained in this task. */
link_t tasks_link; /**< Link to other tasks within the system. */
vm_t *vm;
/kernel/trunk/generic/include/proc/thread.h
70,7 → 70,7
* Must be acquired before T.lock for each T of type task_t.
*
*/
spinlock_t lock;
SPINLOCK_DECLARE(lock);
 
void (* thread_code)(void *); /**< Function implementing the thread. */
void *thread_arg; /**< Argument passed to thread_code() function. */
/kernel/trunk/generic/include/cpu.h
42,7 → 42,7
#define CPU_STACK_SIZE STACK_SIZE
 
struct cpu {
spinlock_t lock;
SPINLOCK_DECLARE(lock);
context_t saved_context;
 
volatile count_t nrdy;
49,7 → 49,7
runq_t rq[RQ_COUNT];
volatile count_t needs_relink;
 
spinlock_t timeoutlock;
SPINLOCK_DECLARE(timeoutlock);
link_t timeout_active_head;
 
#ifdef CONFIG_SMP
/kernel/trunk/generic/include/synch/rwlock.h
33,6 → 33,7
#include <typedefs.h>
#include <synch/mutex.h>
#include <synch/synch.h>
#include <synch/spinlock.h>
 
enum rwlock_type {
RWLOCK_NONE,
41,7 → 42,7
};
 
struct rwlock {
spinlock_t lock;
SPINLOCK_DECLARE(lock);
mutex_t exclusive; /**< Mutex for writers, readers can bypass it if readers_in is positive. */
count_t readers_in; /**< Number of readers in critical section. */
};
/kernel/trunk/generic/include/synch/spinlock.h
41,6 → 41,29
int val;
};
 
/*
* SPINLOCK_DECLARE is to be used for dynamically allocated spinlocks,
* where the lock gets initialized in run time.
*/
#define SPINLOCK_DECLARE(slname) spinlock_t slname
 
/*
* SPINLOCK_INITIALIZE is to be used for statically allocated spinlocks.
* It declares and initializes the lock.
*/
#ifdef CONFIG_DEBUG_SPINLOCK
#define SPINLOCK_INITIALIZE(slname) \
spinlock_t slname = { \
.name = #slname, \
.val = 0 \
}
#else
#define SPINLOCK_INITIALIZE(slname) \
spinlock_t slname = { \
.val = 0 \
}
#endif
 
extern void spinlock_initialize(spinlock_t *sl, char *name);
extern void spinlock_lock(spinlock_t *sl);
extern int spinlock_trylock(spinlock_t *sl);
48,8 → 71,9
 
#else
 
struct spinlock {
};
/* On UP systems, spinlocks are effectively left out. */
#define SPINLOCK_DECLARE(name)
#define SPINLOCK_INITIALIZE(name)
 
#define spinlock_initialize(x,name)
#define spinlock_lock(x) preemption_disable()
/kernel/trunk/generic/include/synch/waitq.h
45,7 → 45,7
*
* Must be acquired before T.lock for each T of type thread_t.
*/
spinlock_t lock;
SPINLOCK_DECLARE(lock);
 
int missed_wakeups; /**< Number of waitq_wakeup() calls that didn't find a thread to wake up. */
link_t head; /**< List of sleeping threads for wich there was no missed_wakeup. */
/kernel/trunk/generic/include/console/chardev.h
52,7 → 52,7
char *name;
waitq_t wq;
spinlock_t lock; /**< Protects everything below. */
SPINLOCK_DECLARE(lock); /**< Protects everything below. */
__u8 buffer[CHARDEV_BUFLEN];
count_t counter;
chardev_operations_t *op; /**< Implementation of chardev operations. */
/kernel/trunk/generic/include/console/kconsole.h
55,7 → 55,7
/** Structure representing one kconsole command. */
struct cmd_info {
link_t link; /**< Command list link. */
spinlock_t lock; /**< This lock protects everything below. */
SPINLOCK_DECLARE(lock); /**< This lock protects everything below. */
const char *name; /**< Command name. */
const char *description; /**< Textual description. */
int (* func)(cmd_arg_t *); /**< Function implementing the command. */
/kernel/trunk/generic/include/mm/frame.h
54,7 → 54,7
struct zone {
link_t link; /**< link to previous and next zone */
 
spinlock_t lock; /**< this lock protects everything below */
SPINLOCK_DECLARE(lock); /**< this lock protects everything below */
__address base; /**< physical address of the first frame in the frames array */
frame_t *frames; /**< array of frame_t structures in this zone */
count_t free_count; /**< number of free frame_t structures */
/kernel/trunk/generic/include/mm/vm.h
57,7 → 57,7
* In the future, it should not be difficult to support shared areas of vm.
*/
struct vm_area {
spinlock_t lock;
SPINLOCK_DECLARE(lock);
link_t link;
vm_type_t type;
int size;
72,7 → 72,7
* set up during system initialization.
*/
struct vm {
spinlock_t lock;
SPINLOCK_DECLARE(lock);
link_t vm_area_head;
pte_t *ptl0;
asid_t asid;
/kernel/trunk/generic/src/console/kconsole.c
65,8 → 65,8
* lower address must be locked first.
*/
spinlock_t cmd_lock; /**< Lock protecting command list. */
link_t cmd_head; /**< Command list. */
SPINLOCK_INITIALIZE(cmd_lock); /**< Lock protecting command list. */
link_t cmd_head; /**< Command list. */
 
static cmd_info_t *parse_cmdline(char *cmdline, size_t len);
static bool parse_argument(char *cmdline, size_t len, index_t *start, index_t *end);
77,7 → 77,6
{
int i;
 
spinlock_initialize(&cmd_lock, "kconsole_cmd");
list_initialize(&cmd_head);
 
cmd_init();
/kernel/trunk/generic/src/proc/task.c
36,7 → 36,7
#include <panic.h>
#include <list.h>
 
spinlock_t tasks_lock;
SPINLOCK_INITIALIZE(tasks_lock);
link_t tasks_head;
 
 
48,7 → 48,6
void task_init(void)
{
TASK = NULL;
spinlock_initialize(&tasks_lock, "tasks_lock");
list_initialize(&tasks_head);
}
 
/kernel/trunk/generic/src/proc/thread.c
54,10 → 54,10
 
char *thread_states[] = {"Invalid", "Running", "Sleeping", "Ready", "Entering", "Exiting"}; /**< Thread states */
 
spinlock_t threads_lock; /**< Lock protecting threads_head list. For locking rules, see declaration thereof. */
link_t threads_head; /**< List of all threads. */
SPINLOCK_INITIALIZE(threads_lock); /**< Lock protecting threads_head list. For locking rules, see declaration thereof. */
link_t threads_head; /**< List of all threads. */
 
static spinlock_t tidlock;
SPINLOCK_INITIALIZE(tidlock);
__u32 last_tid = 0;
 
 
96,7 → 96,6
{
THREAD = NULL;
nrdy = 0;
spinlock_initialize(&threads_lock, "threads_lock");
list_initialize(&threads_head);
}
 
/kernel/trunk/generic/src/main/main.c
174,6 → 174,7
* commands.
*/
kconsole_init();
 
/* Exception handler initialization, before architecture
* starts adding it's own handlers
*/
/kernel/trunk/generic/src/synch/rwlock.c
192,7 → 192,11
* we register a function to unlock rwl->lock
* after this thread is put asleep.
*/
#ifdef CONFIG_SMP
thread_register_call_me(release_spinlock, &rwl->lock);
#else
thread_register_call_me(release_spinlock, NULL);
#endif
rc = _mutex_lock_timeout(&rwl->exclusive, usec, trylock);
switch (rc) {
200,7 → 204,7
/*
* release_spinlock() wasn't called
*/
thread_register_call_me(NULL, NULL);
thread_register_call_me(NULL, NULL);
spinlock_unlock(&rwl->lock);
case ESYNCH_TIMEOUT:
/*
/kernel/trunk/generic/src/debug/print.c
35,8 → 35,8
 
#include <arch.h>
 
static char digits[] = "0123456789abcdef"; /**< Hexadecimal characters */
static spinlock_t printflock; /**< printf spinlock */
static char digits[] = "0123456789abcdef"; /**< Hexadecimal characters */
SPINLOCK_INITIALIZE(printflock); /**< printf spinlock */
 
#define DEFAULT_DOUBLE_PRECISION 16
#define DEFAULT_DOUBLE_BUFFER_SIZE 128
/kernel/trunk/generic/src/cpu/cpu.c
66,11 → 66,14
cpus[i].id = i;
spinlock_initialize(&cpus[i].lock, "cpu_t.lock");
 
#ifdef CONFIG_SMP
waitq_initialize(&cpus[i].kcpulb_wq);
#endif /* __SMP */
for (j = 0; j < RQ_COUNT; j++) {
spinlock_initialize(&cpus[i].rq[j].lock, "rq_t.lock");
list_initialize(&cpus[i].rq[j].rq_head);
}
}
/kernel/trunk/generic/src/mm/tlb.c
37,15 → 37,10
#include <arch.h>
#include <panic.h>
 
#ifdef CONFIG_SMP
static spinlock_t tlblock;
#endif
SPINLOCK_INITIALIZE(tlblock);
 
void tlb_init(void)
{
if (config.cpu_active == 1)
spinlock_initialize(&tlblock, "tlb_lock");
 
tlb_arch_init();
}
 
/kernel/trunk/generic/src/mm/frame.c
41,8 → 41,8
#include <print.h>
#include <align.h>
 
spinlock_t zone_head_lock; /**< this lock protects zone_head list */
link_t zone_head; /**< list of all zones in the system */
SPINLOCK_INITIALIZE(zone_head_lock); /**< this lock protects zone_head list */
link_t zone_head; /**< list of all zones in the system */
 
/** Blacklist containing non-available areas of memory.
*
242,7 → 242,6
*/
void zone_init(void)
{
spinlock_initialize(&zone_head_lock, "zone_head_lock");
list_initialize(&zone_head);
}
 
/kernel/trunk/generic/src/mm/heap.c
43,11 → 43,10
*/
 
static chunk_t *chunk0;
static spinlock_t heaplock;
SPINLOCK_INITIALIZE(heaplock);
 
void early_heap_init(__address heap, size_t size)
{
spinlock_initialize(&heaplock, "heap_lock");
memsetb(heap, size, 0);
chunk0 = (chunk_t *) heap;
chunk0->used = 0;
/kernel/trunk/generic/src/interrupt/interrupt.c
40,7 → 40,7
iroutine f;
} exc_table[IVT_ITEMS];
 
static spinlock_t exctbl_lock;
SPINLOCK_INITIALIZE(exctbl_lock);
 
/** Register exception handler
*
124,8 → 124,6
{
int i;
 
spinlock_initialize(&exctbl_lock, "exctbl_lock");
 
for (i=0;i < IVT_ITEMS; i++)
exc_register(i, "undef", exc_undef);
 
/kernel/trunk/arch/mips32/src/debugger.c
38,7 → 38,7
#include <func.h>
 
bpinfo_t breakpoints[BKPOINTS_MAX];
spinlock_t bkpoint_lock;
SPINLOCK_INITIALIZE(bkpoint_lock);
 
static int cmd_print_breakpoints(cmd_arg_t *argv);
static cmd_info_t pbkpt_info = {
181,7 → 181,6
 
for (i=0; i<BKPOINTS_MAX; i++)
breakpoints[i].address = NULL;
spinlock_initialize(&bkpoint_lock, "breakpoint_lock");
cmd_initialize(&pbkpt_info);
if (!cmd_register(&pbkpt_info))
/kernel/trunk/arch/mips32/src/mm/asid.c
33,7 → 33,7
#include <debug.h>
#include <typedefs.h>
 
static spinlock_t asid_usage_lock;
SPINLOCK_INITIALIZE(asid_usage_lock);
static count_t asid_usage[ASIDS]; /**< Usage tracking array for ASIDs */
 
/** Get ASID
/kernel/trunk/arch/ia32/src/mm/page.c
35,7 → 35,6
#include <func.h>
#include <arch/interrupt.h>
#include <arch/asm.h>
#include <synch/spinlock.h>
#include <debug.h>
#include <memstr.h>
#include <print.h>
/kernel/trunk/arch/ia32/src/drivers/i8042.c
63,7 → 63,7
#define PRESSED_CAPSLOCK (1<<1)
#define LOCKED_CAPSLOCK (1<<0)
 
static spinlock_t keylock; /**< keylock protects keyflags and lockflags. */
SPINLOCK_INITIALIZE(keylock); /**< keylock protects keyflags and lockflags. */
static volatile int keyflags; /**< Tracking of multiple keypresses. */
static volatile int lockflags; /**< Tracking of multiple keys lockings. */
 
243,7 → 243,6
{
exc_register(VECTOR_KBD, "i8042_interrupt", i8042_interrupt);
trap_virtual_enable_irqs(1<<IRQ_KBD);
spinlock_initialize(&keylock, "i8042_lock");
chardev_initialize("i8042_kbd", &kbrd, &ops);
stdin = &kbrd;
}
/kernel/trunk/arch/ia32/src/drivers/ega.c
42,7 → 42,7
* Simple and short. Function for displaying characters and "scrolling".
*/
 
static spinlock_t egalock;
SPINLOCK_INITIALIZE(egalock);
static __u32 ega_cursor;
 
static void ega_putchar(chardev_t *d, const char ch);