Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 457 → Rev 458

/SPARTAN/trunk/genarch/src/acpi/matd.c
42,7 → 42,7
 
struct acpi_madt *acpi_madt = NULL;
 
#ifdef __SMP__
#ifdef CONFIG_SMP
 
static void madt_l_apic_entry(struct madt_l_apic *la, __u32 index);
static void madt_io_apic_entry(struct madt_io_apic *ioa, __u32 index);
210,4 → 210,4
}
 
 
#endif /* __SMP__ */
#endif /* CONFIG_SMP */
/SPARTAN/trunk/generic/include/synch/spinlock.h
33,7 → 33,7
#include <typedefs.h>
#include <preemption.h>
 
#ifdef __SMP__
#ifdef CONFIG_SMP
struct spinlock {
int val;
};
/SPARTAN/trunk/generic/include/smp/smp.h
29,10 → 29,10
#ifndef __SMP_H__
#define __SMP_H__
 
#ifdef __SMP__
#ifdef CONFIG_SMP
extern void smp_init(void);
#else
#define smp_init() ;
#endif /* __SMP__ */
#endif /* CONFIG_SMP */
 
#endif /* __SMP_H__ */
/SPARTAN/trunk/generic/include/smp/ipi.h
29,11 → 29,11
#ifndef __IPI_H__
#define __IPI_H__
 
#ifdef __SMP__
#ifdef CONFIG_SMP
extern void ipi_broadcast(int ipi);
extern void ipi_broadcast_arch(int ipi);
#else
#define ipi_broadcast(x) ;
#endif /* __SMP__ */
#endif /* CONFIG_SMP */
 
#endif
/SPARTAN/trunk/generic/include/mm/tlb.h
33,7 → 33,7
 
extern void tlb_init(void);
 
#ifdef __SMP__
#ifdef CONFIG_SMP
extern void tlb_shootdown_start(void);
extern void tlb_shootdown_finalize(void);
extern void tlb_shootdown_ipi_recv(void);
41,7 → 41,7
# define tlb_shootdown_start() ;
# define tlb_shootdown_finalize() ;
# define tlb_shootdown_ipi_recv() ;
#endif /* __SMP__ */
#endif /* CONFIG_SMP */
 
/* Export TLB interface that each architecture must implement. */
extern void tlb_init_arch(void);
/SPARTAN/trunk/generic/include/cpu.h
52,10 → 52,10
spinlock_t timeoutlock;
link_t timeout_active_head;
 
#ifdef __SMP__
#ifdef CONFIG_SMP
int kcpulbstarted;
waitq_t kcpulb_wq;
#endif /* __SMP__ */
#endif /* CONFIG_SMP */
 
int id;
int active;
/SPARTAN/trunk/generic/src/smp/ipi.c
26,7 → 26,7
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifdef __SMP__
#ifdef CONFIG_SMP
 
#include <smp/ipi.h>
#include <config.h>
44,7 → 44,7
/*
* Provisions must be made to avoid sending IPI:
* - before all CPU's were configured to accept the IPI
* - if there is only one CPU but the kernel was compiled with __SMP__
* - if there is only one CPU but the kernel was compiled with CONFIG_SMP
*/
 
if ((config.cpu_active > 1) && (config.cpu_active == config.cpu_count))
51,4 → 51,4
ipi_broadcast_arch(ipi);
}
 
#endif /* __SMP__ */
#endif /* CONFIG_SMP */
/SPARTAN/trunk/generic/src/proc/scheduler.c
61,7 → 61,7
void before_thread_runs(void)
{
before_thread_runs_arch();
#ifdef FPU_LAZY
#ifdef CONFIG_FPU_LAZY
if(THREAD==CPU->fpu_owner)
fpu_enable();
else
77,7 → 77,7
#endif
}
 
#ifdef FPU_LAZY
#ifdef CONFIG_FPU_LAZY
void scheduler_fpu_lazy_request(void)
{
fpu_enable();
134,7 → 134,7
interrupts_enable();
if (n == 0) {
#ifdef __SMP__
#ifdef CONFIG_SMP
/*
* If the load balancing thread is not running, wake it up and
* set CPU-private flag that the kcpulb has been started.
143,7 → 143,7
waitq_wakeup(&CPU->kcpulb_wq, 0);
goto loop;
}
#endif /* __SMP__ */
#endif /* CONFIG_SMP */
/*
* For there was nothing to run, the CPU goes to sleep
412,7 → 412,7
 
if (THREAD) {
spinlock_lock(&THREAD->lock);
#ifndef FPU_LAZY
#ifndef CONFIG_FPU_LAZY
fpu_context_save(&(THREAD->saved_fpu_context));
#endif
if (!context_save(&THREAD->saved_context)) {
462,7 → 462,7
 
 
 
#ifdef __SMP__
#ifdef CONFIG_SMP
/** Load balancing thread
*
* SMP load balancing thread, supervising thread supplies
623,4 → 623,4
goto loop;
}
 
#endif /* __SMP__ */
#endif /* CONFIG_SMP */
/SPARTAN/trunk/generic/src/main/kinit.c
43,9 → 43,9
#include <print.h>
#include <memstr.h>
 
#ifdef __SMP__
#ifdef CONFIG_SMP
#include <arch/smp/mps.h>
#endif /* __SMP__ */
#endif /* CONFIG_SMP */
 
#include <synch/waitq.h>
#include <synch/spinlock.h>
66,7 → 66,7
 
interrupts_disable();
 
#ifdef __SMP__
#ifdef CONFIG_SMP
if (config.cpu_count > 1) {
/*
* Create the kmp thread and wait for its completion.
84,7 → 84,7
}
else panic("thread_create/kmp");
}
#endif /* __SMP__ */
#endif /* CONFIG_SMP */
/*
* Now that all CPUs are up, we can report what we've found.
*/
95,7 → 95,7
printf("cpu%d: not active\n", i);
}
 
#ifdef __SMP__
#ifdef CONFIG_SMP
if (config.cpu_count > 1) {
/*
* For each CPU, create its load balancing thread.
113,7 → 113,7
 
}
}
#endif /* __SMP__ */
#endif /* CONFIG_SMP */
 
interrupts_enable();
 
/SPARTAN/trunk/generic/src/main/main.c
39,10 → 39,10
#include <cpu.h>
#include <align.h>
 
#ifdef __SMP__
#ifdef CONFIG_SMP
#include <arch/smp/apic.h>
#include <arch/smp/mps.h>
#endif /* __SMP__ */
#endif /* CONFIG_SMP */
 
#include <smp/smp.h>
 
202,7 → 202,7
}
 
 
#ifdef __SMP__
#ifdef CONFIG_SMP
/** Application CPUs main kernel routine
*
* Executed by application processors, temporary stack
268,4 → 268,4
scheduler();
/* not reached */
}
#endif /* __SMP__*/
#endif /* CONFIG_SMP */
/SPARTAN/trunk/generic/src/synch/spinlock.c
34,7 → 34,7
#include <print.h>
#include <debug.h>
 
#ifdef __SMP__
#ifdef CONFIG_SMP
 
/** Initialize spinlock
*
47,7 → 47,7
sl->val = 0;
}
 
#ifdef DEBUG_SPINLOCK
#ifdef CONFIG_DEBUG_SPINLOCK
/** Lock spinlock
*
* Lock spinlock.
/SPARTAN/trunk/generic/src/cpu/cpu.c
49,9 → 49,9
void cpu_init(void) {
int i, j;
#ifdef __SMP__
#ifdef CONFIG_SMP
if (config.cpu_active == 1) {
#endif /* __SMP__ */
#endif /* CONFIG_SMP */
cpus = (cpu_t *) malloc(sizeof(cpu_t) * config.cpu_count);
if (!cpus)
panic("malloc/cpus");
66,7 → 66,7
cpus[i].id = i;
#ifdef __SMP__
#ifdef CONFIG_SMP
waitq_initialize(&cpus[i].kcpulb_wq);
#endif /* __SMP */
75,9 → 75,9
}
}
#ifdef __SMP__
#ifdef CONFIG_SMP
}
#endif /* __SMP__ */
#endif /* CONFIG_SMP */
 
CPU = &cpus[config.cpu_active-1];
/SPARTAN/trunk/generic/src/mm/tlb.c
36,7 → 36,7
#include <config.h>
#include <arch.h>
 
#ifdef __SMP__
#ifdef CONFIG_SMP
static spinlock_t tlblock;
#endif
 
48,7 → 48,7
tlb_init_arch();
}
 
#ifdef __SMP__
#ifdef CONFIG_SMP
/* must be called with interrupts disabled */
void tlb_shootdown_start(void)
{
84,4 → 84,4
tlb_invalidate(0); /* TODO: use valid ASID */
CPU->tlb_active = 1;
}
#endif /* __SMP__ */
#endif /* CONFIG_SMP */
/SPARTAN/trunk/arch/amd64/Makefile.inc
60,13 → 60,13
#
 
ifeq ($(CONFIG_SMP),y)
DEFS += -DSMP
DEFS += -DCONFIG_SMP
endif
ifeq ($(CONFIG_HT),y)
DEFS += -DHT
DEFS += -DCONFIG_HT
endif
ifeq ($(CONFIG_FPU_LAZY),y)
DEFS += -DFPU_LAZY
DEFS += -DCONFIG_FPU_LAZY
endif
 
ARCH_SOURCES = \
/SPARTAN/trunk/arch/amd64/src/amd64.c
75,10 → 75,10
 
trap_register(VECTOR_SYSCALL, syscall);
#ifdef __SMP__
#ifdef CONFIG_SMP
trap_register(VECTOR_TLB_SHOOTDOWN_IPI, tlb_shootdown_ipi);
trap_register(VECTOR_WAKEUP_IPI, wakeup_ipi);
#endif /* __SMP__ */
#endif /* CONFIG_SMP */
}
}
 
94,9 → 94,9
if (config.cpu_active == 1) {
memory_print_map();
#ifdef __SMP__
#ifdef CONFIG_SMP
acpi_init();
#endif /* __SMP__ */
#endif /* CONFIG_SMP */
}
}
 
/SPARTAN/trunk/arch/amd64/src/smp/ap.S
39,7 → 39,7
.section K_TEXT_START_2, "ax"
 
#ifdef __SMP__
#ifdef CONFIG_SMP
 
.global ap_boot
 
100,4 → 100,4
call main_ap # never returns
#endif /* __SMP__ */
#endif /* CONFIG_SMP */
/SPARTAN/trunk/arch/amd64/src/interrupt.c
139,7 → 139,7
 
void nm_fault(__u8 n, __native stack[])
{
#ifdef FPU_LAZY
#ifdef CONFIG_FPU_LAZY
scheduler_fpu_lazy_request();
#else
panic("fpu fault");
/SPARTAN/trunk/arch/mips32/Makefile.inc
102,7 → 102,7
#
 
ifeq ($(CONFIG_FPU_LAZY),y)
DEFS += -DFPU_LAZY
DEFS += -DCONFIG_FPU_LAZY
endif
 
ARCH_SOURCES = \
/SPARTAN/trunk/arch/mips32/src/exception.c
73,7 → 73,7
tlb_invalid(pstate);
break;
case EXC_CpU:
#ifdef FPU_LAZY
#ifdef CONFIG_FPU_LAZY
if (cp0_cause_coperr(cause) == fpu_cop_id)
scheduler_fpu_lazy_request();
else
/SPARTAN/trunk/arch/ia32/include/atomic.h
32,19 → 32,19
#include <arch/types.h>
 
static inline void atomic_inc(volatile int *val) {
#ifdef __SMP__
#ifdef CONFIG_SMP
__asm__ volatile ("lock incl %0\n" : "=m" (*val));
#else
__asm__ volatile ("incl %0\n" : "=m" (*val));
#endif /* __SMP__ */
#endif /* CONFIG_SMP */
}
 
static inline void atomic_dec(volatile int *val) {
#ifdef __SMP__
#ifdef CONFIG_SMP
__asm__ volatile ("lock decl %0\n" : "=m" (*val));
#else
__asm__ volatile ("decl %0\n" : "=m" (*val));
#endif /* __SMP__ */
#endif /* CONFIG_SMP */
}
 
static inline int test_and_set(volatile int *val) {
/SPARTAN/trunk/arch/ia32/Makefile.inc
79,13 → 79,13
#
 
ifeq ($(CONFIG_SMP),y)
DEFS += -DSMP
DEFS += -DCONFIG_SMP
endif
ifeq ($(CONFIG_HT),y)
DEFS += -DHT
DEFS += -DCONFIG_HT
endif
ifeq ($(CONFIG_FPU_LAZY),y)
DEFS += -DFPU_LAZY
DEFS += -DCONFIG_FPU_LAZY
endif
 
ARCH_SOURCES = \
/SPARTAN/trunk/arch/ia32/src/ia32.c
62,10 → 62,10
trap_register(VECTOR_SYSCALL, syscall);
#ifdef __SMP__
#ifdef CONFIG_SMP
trap_register(VECTOR_TLB_SHOOTDOWN_IPI, tlb_shootdown_ipi);
trap_register(VECTOR_WAKEUP_IPI, wakeup_ipi);
#endif /* __SMP__ */
#endif /* CONFIG_SMP */
}
}
 
81,9 → 81,9
if (config.cpu_active == 1) {
memory_print_map();
#ifdef __SMP__
#ifdef CONFIG_SMP
acpi_init();
#endif /* __SMP__ */
#endif /* CONFIG_SMP */
}
}
 
/SPARTAN/trunk/arch/ia32/src/smp/mps.c
26,7 → 26,7
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifdef __SMP__
#ifdef CONFIG_SMP
 
#include <config.h>
#include <print.h>
422,4 → 422,4
return -1;
}
 
#endif /* __SMP__ */
#endif /* CONFIG_SMP */
/SPARTAN/trunk/arch/ia32/src/smp/smp.c
47,7 → 47,7
#include <memstr.h>
#include <arch/i8259.h>
 
#ifdef __SMP__
#ifdef CONFIG_SMP
 
static struct smp_config_operations *ops = NULL;
 
165,4 → 165,4
waitq_wakeup(&kmp_completion_wq, WAKEUP_FIRST);
}
 
#endif /* __SMP__ */
#endif /* CONFIG_SMP */
/SPARTAN/trunk/arch/ia32/src/smp/ap.S
32,7 → 32,7
 
.section K_TEXT_START_2, "ax"
 
#ifdef __SMP__
#ifdef CONFIG_SMP
 
.global ap_boot
 
73,4 → 73,4
jmpl $KTEXT, $main_ap
 
#endif /* __SMP__ */
#endif /* CONFIG_SMP */
/SPARTAN/trunk/arch/ia32/src/smp/ipi.c
26,7 → 26,7
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
#ifdef __SMP__
#ifdef CONFIG_SMP
 
#include <smp/ipi.h>
#include <arch/smp/apic.h>
36,4 → 36,4
(void) l_apic_broadcast_custom_ipi((__u8) ipi);
}
 
#endif /* __SMP__ */
#endif /* CONFIG_SMP */
/SPARTAN/trunk/arch/ia32/src/smp/apic.c
37,7 → 37,7
#include <arch/asm.h>
#include <arch.h>
 
#ifdef __SMP__
#ifdef CONFIG_SMP
 
/*
* This is functional, far-from-general-enough interface to the APIC.
416,4 → 416,4
 
}
 
#endif /* __SMP__ */
#endif /* CONFIG_SMP */
/SPARTAN/trunk/arch/ia32/src/interrupt.c
110,7 → 110,7
 
void nm_fault(__u8 n, __native stack[])
{
#ifdef FPU_LAZY
#ifdef CONFIG_FPU_LAZY
scheduler_fpu_lazy_request();
#else
panic("fpu fault");
/SPARTAN/trunk/arch/ia32/src/atomic.S
28,7 → 28,7
 
.text
 
#ifdef __SMP__
#ifdef CONFIG_SMP
 
.global spinlock_arch
 
42,7 → 42,7
movl 12(%esp),%ebx
 
0:
#ifdef __HT__
#ifdef CONFIG_HT
pause # Pentium 4's with HT love this instruction
#endif
movl (%ebx),%eax
/SPARTAN/trunk/Makefile
62,7 → 62,7
DEFS += -DNDEBUG
endif
ifeq ($(CONFIG_DEBUG_SPINLOCK),y)
DEFS += -DDEBUG_SPINLOCK
DEFS += -DCONFIG_DEBUG_SPINLOCK
endif
 
## Toolchain configuration