/kernel/trunk/generic/src/console/kconsole.c |
---|
65,8 → 65,8 |
* lower address must be locked first. |
*/ |
spinlock_t cmd_lock; /**< Lock protecting command list. */ |
link_t cmd_head; /**< Command list. */ |
SPINLOCK_INITIALIZE(cmd_lock); /**< Lock protecting command list. */ |
link_t cmd_head; /**< Command list. */ |
static cmd_info_t *parse_cmdline(char *cmdline, size_t len); |
static bool parse_argument(char *cmdline, size_t len, index_t *start, index_t *end); |
77,7 → 77,6 |
{ |
int i; |
spinlock_initialize(&cmd_lock, "kconsole_cmd"); |
list_initialize(&cmd_head); |
cmd_init(); |
/kernel/trunk/generic/src/proc/task.c |
---|
36,7 → 36,7 |
#include <panic.h> |
#include <list.h> |
spinlock_t tasks_lock; |
SPINLOCK_INITIALIZE(tasks_lock); |
link_t tasks_head; |
48,7 → 48,6 |
void task_init(void) |
{ |
TASK = NULL; |
spinlock_initialize(&tasks_lock, "tasks_lock"); |
list_initialize(&tasks_head); |
} |
/kernel/trunk/generic/src/proc/thread.c |
---|
54,10 → 54,10 |
char *thread_states[] = {"Invalid", "Running", "Sleeping", "Ready", "Entering", "Exiting"}; /**< Thread states */ |
spinlock_t threads_lock; /**< Lock protecting threads_head list. For locking rules, see declaration thereof. */ |
link_t threads_head; /**< List of all threads. */ |
SPINLOCK_INITIALIZE(threads_lock); /**< Lock protecting threads_head list. For locking rules, see declaration thereof. */ |
link_t threads_head; /**< List of all threads. */ |
static spinlock_t tidlock; |
SPINLOCK_INITIALIZE(tidlock); |
__u32 last_tid = 0; |
96,7 → 96,6 |
{ |
THREAD = NULL; |
nrdy = 0; |
spinlock_initialize(&threads_lock, "threads_lock"); |
list_initialize(&threads_head); |
} |
/kernel/trunk/generic/src/main/main.c |
---|
174,6 → 174,7 |
* commands. |
*/ |
kconsole_init(); |
/* Exception handler initialization, before architecture |
* starts adding it's own handlers |
*/ |
/kernel/trunk/generic/src/synch/rwlock.c |
---|
192,7 → 192,11 |
* we register a function to unlock rwl->lock |
* after this thread is put asleep. |
*/ |
#ifdef CONFIG_SMP |
thread_register_call_me(release_spinlock, &rwl->lock); |
#else |
thread_register_call_me(release_spinlock, NULL); |
#endif |
rc = _mutex_lock_timeout(&rwl->exclusive, usec, trylock); |
switch (rc) { |
200,7 → 204,7 |
/* |
* release_spinlock() wasn't called |
*/ |
thread_register_call_me(NULL, NULL); |
thread_register_call_me(NULL, NULL); |
spinlock_unlock(&rwl->lock); |
case ESYNCH_TIMEOUT: |
/* |
/kernel/trunk/generic/src/debug/print.c |
---|
35,8 → 35,8 |
#include <arch.h> |
static char digits[] = "0123456789abcdef"; /**< Hexadecimal characters */ |
static spinlock_t printflock; /**< printf spinlock */ |
static char digits[] = "0123456789abcdef"; /**< Hexadecimal characters */ |
SPINLOCK_INITIALIZE(printflock); /**< printf spinlock */ |
#define DEFAULT_DOUBLE_PRECISION 16 |
#define DEFAULT_DOUBLE_BUFFER_SIZE 128 |
/kernel/trunk/generic/src/cpu/cpu.c |
---|
66,11 → 66,14 |
cpus[i].id = i; |
spinlock_initialize(&cpus[i].lock, "cpu_t.lock"); |
#ifdef CONFIG_SMP |
waitq_initialize(&cpus[i].kcpulb_wq); |
#endif /* __SMP */ |
for (j = 0; j < RQ_COUNT; j++) { |
spinlock_initialize(&cpus[i].rq[j].lock, "rq_t.lock"); |
list_initialize(&cpus[i].rq[j].rq_head); |
} |
} |
/kernel/trunk/generic/src/mm/tlb.c |
---|
37,15 → 37,10 |
#include <arch.h> |
#include <panic.h> |
#ifdef CONFIG_SMP |
static spinlock_t tlblock; |
#endif |
SPINLOCK_INITIALIZE(tlblock); |
void tlb_init(void) |
{ |
if (config.cpu_active == 1) |
spinlock_initialize(&tlblock, "tlb_lock"); |
tlb_arch_init(); |
} |
/kernel/trunk/generic/src/mm/frame.c |
---|
41,8 → 41,8 |
#include <print.h> |
#include <align.h> |
spinlock_t zone_head_lock; /**< this lock protects zone_head list */ |
link_t zone_head; /**< list of all zones in the system */ |
SPINLOCK_INITIALIZE(zone_head_lock); /**< this lock protects zone_head list */ |
link_t zone_head; /**< list of all zones in the system */ |
/** Blacklist containing non-available areas of memory. |
* |
242,7 → 242,6 |
*/ |
void zone_init(void) |
{ |
spinlock_initialize(&zone_head_lock, "zone_head_lock"); |
list_initialize(&zone_head); |
} |
/kernel/trunk/generic/src/mm/heap.c |
---|
43,11 → 43,10 |
*/ |
static chunk_t *chunk0; |
static spinlock_t heaplock; |
SPINLOCK_INITIALIZE(heaplock); |
void early_heap_init(__address heap, size_t size) |
{ |
spinlock_initialize(&heaplock, "heap_lock"); |
memsetb(heap, size, 0); |
chunk0 = (chunk_t *) heap; |
chunk0->used = 0; |
/kernel/trunk/generic/src/interrupt/interrupt.c |
---|
40,7 → 40,7 |
iroutine f; |
} exc_table[IVT_ITEMS]; |
static spinlock_t exctbl_lock; |
SPINLOCK_INITIALIZE(exctbl_lock); |
/** Register exception handler |
* |
124,8 → 124,6 |
{ |
int i; |
spinlock_initialize(&exctbl_lock, "exctbl_lock"); |
for (i=0;i < IVT_ITEMS; i++) |
exc_register(i, "undef", exc_undef); |