/kernel/trunk/generic/include/cpu.h |
---|
53,7 → 53,7 |
link_t timeout_active_head; |
#ifdef CONFIG_SMP |
atomic_t kcpulbstarted; |
int kcpulbstarted; |
waitq_t kcpulb_wq; |
#endif /* CONFIG_SMP */ |
/kernel/trunk/generic/include/synch/spinlock.h |
---|
32,7 → 32,6 |
#include <arch/types.h> |
#include <typedefs.h> |
#include <preemption.h> |
#include <arch/atomic.h> |
#ifdef CONFIG_SMP |
struct spinlock { |
39,7 → 38,7 |
#ifdef CONFIG_DEBUG_SPINLOCK |
char *name; |
#endif |
atomic_t val; |
int val; |
}; |
/* |
56,12 → 55,12 |
#define SPINLOCK_INITIALIZE(slname) \ |
spinlock_t slname = { \ |
.name = #slname, \ |
.val = { 0 } \ |
.val = 0 \ |
} |
#else |
#define SPINLOCK_INITIALIZE(slname) \ |
spinlock_t slname = { \ |
.val = { 0 } \ |
.val = 0 \ |
} |
#endif |
/kernel/trunk/generic/include/debug.h |
---|
33,7 → 33,7 |
#include <arch/debug.h> |
#include <arch.h> |
#define CALLER ((__address)__builtin_return_address(0)) |
#define CALLER ((__address *)__builtin_return_address(0)) |
#ifndef HERE |
/** Current Instruction Pointer address */ |
/kernel/trunk/generic/src/console/kconsole.c |
---|
91,6 → 91,7 |
*/ |
int cmd_register(cmd_info_t *cmd) |
{ |
ipl_t ipl; |
link_t *cur; |
spinlock_lock(&cmd_lock); |
159,6 → 160,8 |
{ |
int namelen = strlen(name); |
const char *curname; |
char *foundsym = NULL; |
int foundpos = 0; |
spinlock_lock(&cmd_lock); |
466,6 → 469,7 |
index_t start = 0, end = 0; |
cmd_info_t *cmd = NULL; |
link_t *cur; |
ipl_t ipl; |
int i; |
if (!parse_argument(cmdline, len, &start, &end)) { |
/kernel/trunk/generic/src/console/cmd.c |
---|
310,6 → 310,7 |
int cmd_help(cmd_arg_t *argv) |
{ |
link_t *cur; |
ipl_t ipl; |
spinlock_lock(&cmd_lock); |
338,6 → 339,7 |
int cmd_desc(cmd_arg_t *argv) |
{ |
link_t *cur; |
ipl_t ipl; |
spinlock_lock(&cmd_lock); |
366,6 → 368,9 |
/** Search symbol table */ |
int cmd_symaddr(cmd_arg_t *argv) |
{ |
__address symaddr; |
char *symbol; |
symtab_print_search(argv->buffer); |
return 1; |
505,6 → 510,7 |
/** Write 4 byte value to address */ |
int cmd_set4(cmd_arg_t *argv) |
{ |
char *symbol; |
__u32 *addr ; |
__u32 arg1 = argv[1].intval; |
bool pointer = false; |
/kernel/trunk/generic/src/proc/thread.c |
---|
95,7 → 95,7 |
void thread_init(void) |
{ |
THREAD = NULL; |
atomic_set(&nrdy,0); |
nrdy = 0; |
} |
111,7 → 111,7 |
cpu_t *cpu; |
runq_t *r; |
ipl_t ipl; |
int i, avg; |
int i, avg, send_ipi = 0; |
ipl = interrupts_disable(); |
135,7 → 135,7 |
spinlock_unlock(&r->lock); |
atomic_inc(&nrdy); |
avg = atomic_get(&nrdy) / config.cpu_active; |
avg = nrdy / config.cpu_active; |
spinlock_lock(&cpu->lock); |
if ((++cpu->nrdy) > avg) { |
/kernel/trunk/generic/src/proc/scheduler.c |
---|
490,7 → 490,7 |
*/ |
ipl = interrupts_disable(); |
spinlock_lock(&CPU->lock); |
count = atomic_get(&nrdy) / config.cpu_active; |
count = nrdy / config.cpu_active; |
count -= CPU->nrdy; |
spinlock_unlock(&CPU->lock); |
interrupts_restore(ipl); |
618,7 → 618,7 |
/* |
* Tell find_best_thread() to wake us up later again. |
*/ |
atomic_set(&CPU->kcpulbstarted,0); |
CPU->kcpulbstarted = 0; |
goto loop; |
} |
/kernel/trunk/generic/src/mm/vm.c |
---|
188,6 → 188,7 |
void vm_install(vm_t *m) |
{ |
link_t *l; |
ipl_t ipl; |
ipl = interrupts_disable(); |
/kernel/trunk/generic/src/main/main.c |
---|
125,9 → 125,7 |
* pop sequence otherwise. |
*/ |
static void main_bsp_separated_stack(void); |
#ifdef CONFIG_SMP |
static void main_ap_separated_stack(void); |
#endif |
/** Bootstrap CPU main kernel routine |
* |
/kernel/trunk/generic/src/main/kinit.c |
---|
68,13 → 68,11 |
*/ |
void kinit(void *arg) |
{ |
thread_t *t; |
int i; |
#ifdef CONFIG_USERSPACE |
vm_t *m; |
vm_area_t *a; |
task_t *u; |
#endif |
thread_t *t; |
int i; |
interrupts_disable(); |
/kernel/trunk/generic/src/synch/rwlock.c |
---|
225,7 → 225,7 |
case ESYNCH_OK_ATOMIC: |
panic("_mutex_lock_timeout()==ESYNCH_OK_ATOMIC\n"); |
break; |
default: |
dafault: |
panic("invalid ESYNCH\n"); |
break; |
} |
/kernel/trunk/generic/src/synch/spinlock.c |
---|
45,7 → 45,7 |
*/ |
void spinlock_initialize(spinlock_t *sl, char *name) |
{ |
atomic_set(&sl->val, 0); |
sl->val = 0; |
#ifdef CONFIG_DEBUG_SPINLOCK |
sl->name = name; |
#endif |
151,7 → 151,7 |
*/ |
void spinlock_unlock(spinlock_t *sl) |
{ |
ASSERT(atomic_get(&sl->val) != 0); |
ASSERT(sl->val != 0); |
/* |
* Prevent critical section code from bleeding out this way down. |
158,7 → 158,7 |
*/ |
CS_LEAVE_BARRIER(); |
atomic_set(&sl->val,0); |
sl->val = 0; |
preemption_enable(); |
} |
/kernel/trunk/generic/src/debug/symtab.c |
---|
66,6 → 66,8 |
int namelen = strlen(name); |
char *curname; |
int i,j; |
char *foundsym = NULL; |
int foundpos = 0; |
int colonoffset = -1; |
for (i=0;name[i];i++) |
/kernel/trunk/test/synch/rwlock4/test.c |
---|
61,6 → 61,7 |
__u32 random(__u32 max) |
{ |
__u32 rc; |
ipl_t ipl; |
spinlock_lock(&lock); |
rc = seed % max; |
/kernel/trunk/arch/mips32/src/drivers/arc.c |
---|
188,8 → 188,6 |
arc_putchar('R'); |
arc_putchar('C'); |
arc_putchar('\n'); |
return 0; |
} |
static bool kbd_polling_enabled; |
288,6 → 286,7 |
int total = 0; |
__address base; |
size_t basesize; |
unsigned int i,j; |
desc = arc_entry->getmemorydescriptor(NULL); |
while (desc) { |
/kernel/trunk/arch/mips32/src/drivers/serial.c |
---|
38,6 → 38,7 |
static void serial_write(chardev_t *d, const char ch) |
{ |
int i; |
serial_t *sd = (serial_t *)d->data; |
if (ch == '\n') |
/kernel/trunk/arch/mips32/include/atomic.h |
---|
41,7 → 41,7 |
#define atomic_dec_post(x) atomic_add(x, -1) |
typedef struct { volatile __u32 count; } atomic_t; |
typedef volatile __u32 atomic_t; |
/* Atomic addition of immediate value. |
* |
62,7 → 62,7 |
" sc %0, %1\n" |
" beq %0, %4, 1b\n" /* if the atomic operation failed, try again */ |
/* nop */ /* nop is inserted automatically by compiler */ |
: "=r" (tmp), "=m" (val->count), "=r" (v) |
: "=r" (tmp), "=m" (*val), "=r" (v) |
: "i" (i), "i" (0) |
); |
69,16 → 69,5 |
return v; |
} |
/* Reads/writes are atomic on mips for 4-bytes */ |
static inline void atomic_set(atomic_t *val, __u32 i) |
{ |
val->count = i; |
} |
static inline __u32 atomic_get(atomic_t *val) |
{ |
return val->count; |
} |
#endif |
/kernel/trunk/arch/ia32/src/mm/frame.c |
---|
43,6 → 43,7 |
void frame_arch_init(void) |
{ |
zone_t *z; |
__u8 i; |
if (config.cpu_active == 1) { |
/kernel/trunk/arch/ia32/src/smp/smp.c |
---|
87,6 → 87,7 |
*/ |
void kmp(void *arg) |
{ |
__address src, dst; |
int i; |
ASSERT(ops != NULL); |
/kernel/trunk/arch/ia32/src/fmath.c |
---|
62,8 → 62,7 |
fmath_ld_union_t fmath_ld_union_num; |
fmath_ld_union_t fmath_ld_union_int; |
signed short exp; |
__u64 mask; |
// __u64 mantisa; |
__u64 mask,mantisa; |
int i; |
exp=fmath_get_binary_exponent(num); |
/kernel/trunk/arch/ia32/include/atomic.h |
---|
31,31 → 31,21 |
#include <arch/types.h> |
typedef struct { volatile __u32 count; } atomic_t; |
typedef volatile __u32 atomic_t; |
static inline void atomic_set(atomic_t *val, __u32 i) |
{ |
val->count = i; |
} |
static inline __u32 atomic_get(atomic_t *val) |
{ |
return val->count; |
} |
static inline void atomic_inc(atomic_t *val) { |
#ifdef CONFIG_SMP |
__asm__ volatile ("lock incl %0\n" : "=m" (val->count)); |
__asm__ volatile ("lock incl %0\n" : "=m" (*val)); |
#else |
__asm__ volatile ("incl %0\n" : "=m" (val->count)); |
__asm__ volatile ("incl %0\n" : "=m" (*val)); |
#endif /* CONFIG_SMP */ |
} |
static inline void atomic_dec(atomic_t *val) { |
#ifdef CONFIG_SMP |
__asm__ volatile ("lock decl %0\n" : "=m" (val->count)); |
__asm__ volatile ("lock decl %0\n" : "=m" (*val)); |
#else |
__asm__ volatile ("decl %0\n" : "=m" (val->count)); |
__asm__ volatile ("decl %0\n" : "=m" (*val)); |
#endif /* CONFIG_SMP */ |
} |
65,7 → 55,7 |
__asm__ volatile ( |
"movl $1, %0\n" |
"lock xaddl %0, %1\n" |
: "=r"(r), "=m" (val->count) |
: "=r"(r), "=m" (*val) |
); |
return r; |
} |
86,13 → 76,13 |
#define atomic_inc_post(val) (atomic_inc_pre(val)+1) |
#define atomic_dec_post(val) (atomic_dec_pre(val)-1) |
static inline int test_and_set(atomic_t *val) { |
static inline int test_and_set(volatile int *val) { |
int v; |
__asm__ volatile ( |
"movl $1, %0\n" |
"xchgl %0, %1\n" |
: "=r" (v),"=m" (val->count) |
: "=r" (v),"=m" (*val) |
); |
return v; |
/kernel/trunk/arch/sparc64/include/atomic.h |
---|
31,7 → 31,7 |
#include <arch/types.h> |
typedef struct { volatile __u64 count; } atomic_t; |
typedef volatile __u64 atomic_t; |
/* |
* TODO: these are just placeholders for real implementations of atomic_inc and atomic_dec. |
39,11 → 39,11 |
*/ |
static inline void atomic_inc(atomic_t *val) { |
val->count++; |
*val++; |
} |
static inline void atomic_dec(atomic_t *val) { |
val->count--; |
*val--; |
} |
#endif |
/kernel/trunk/arch/amd64/include/asm.h |
---|
54,24 → 54,33 |
static inline void cpu_halt(void) { __asm__ volatile ("hlt\n"); }; |
/** Byte from port |
* |
* Get byte from port |
* |
* @param port Port to read from |
* @return Value read |
*/ |
static inline __u8 inb(__u16 port) { __u8 val; __asm__ volatile ("inb %w1, %b0 \n" : "=a" (val) : "d" (port) ); return val; } |
static inline __u8 inb(__u16 port) |
{ |
__u8 out; |
/** Byte to port |
* |
* Output byte to port |
* |
* @param port Port to write to |
* @param val Value to write |
*/ |
static inline void outb(__u16 port, __u8 val) { __asm__ volatile ("outb %b0, %w1\n" : : "a" (val), "d" (port) ); } |
__asm__ volatile ( |
"mov %1, %%dx\n" |
"inb %%dx,%%al\n" |
"mov %%al, %0\n" |
:"=m"(out) |
:"m"(port) |
:"%rdx","%rax" |
); |
return out; |
} |
static inline __u8 outb(__u16 port,__u8 b) |
{ |
__asm__ volatile ( |
"mov %0,%%dx\n" |
"mov %1,%%al\n" |
"outb %%al,%%dx\n" |
: |
:"m"( port), "m" (b) |
:"%rdx","%rax" |
); |
} |
/** Enable interrupts. |
* |
* Enable interrupts and return previous |
/kernel/trunk/arch/amd64/src/cpu/cpu.c |
---|
125,6 → 125,7 |
void cpu_identify(void) |
{ |
cpu_info_t info; |
int i; |
CPU->arch.vendor = VendorUnknown; |
if (has_cpuid()) { |
/kernel/trunk/arch/ia64/include/atomic.h |
---|
31,7 → 31,7 |
#include <arch/types.h> |
typedef struct { volatile __u64 count; } atomic_t; |
typedef volatile __u64 atomic_t; |
static inline atomic_t atomic_add(atomic_t *val, int imm) |
{ |
38,21 → 38,11 |
atomic_t v; |
__asm__ volatile ("fetchadd8.rel %0 = %1, %2\n" : "=r" (v), "+m" (val->count) : "i" (imm)); |
__asm__ volatile ("fetchadd8.rel %0 = %1, %2\n" : "=r" (v), "+m" (*val) : "i" (imm)); |
return v; |
} |
static inline void atomic_set(atomic_t *val, __u64 i) |
{ |
val->count = i; |
} |
static inline __u32 atomic_get(atomic_t *val) |
{ |
return val->count; |
} |
static inline void atomic_inc(atomic_t *val) { atomic_add(val, 1); } |
static inline void atomic_dec(atomic_t *val) { atomic_add(val, -1); } |
/kernel/trunk/arch/ppc32/include/atomic.h |
---|
31,7 → 31,7 |
#include <arch/types.h> |
typedef struct { volatile __u32 count; } atomic_t; |
typedef volatile __u32 atomic_t; |
/* |
* TODO: these are just placeholders for real implementations of atomic_inc and atomic_dec. |
39,21 → 39,11 |
*/ |
static inline void atomic_inc(atomic_t *val) { |
val->count++; |
*val++; |
} |
static inline void atomic_dec(atomic_t *val) { |
val->count--; |
*val--; |
} |
static inline void atomic_set(atomic_t *val, __u32 i) |
{ |
val->count = i; |
} |
static inline __u32 atomic_get(atomic_t *val) |
{ |
return val->count; |
} |
#endif |