Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 2088 → Rev 2089

/trunk/kernel/generic/include/func.h
36,7 → 36,6
#define KERN_FUNC_H_
 
#include <arch/types.h>
#include <typedefs.h>
#include <atomic.h>
 
extern atomic_t haltstate;
/trunk/kernel/generic/include/fpu_context.h
36,7 → 36,6
#define KERN_FPU_CONTEXT_H_
 
#include <arch/fpu_context.h>
#include <typedefs.h>
 
#if defined(CONFIG_FPU_LAZY) && !defined(ARCH_HAS_FPU)
# error "CONFIG_FPU_LAZY defined, but no ARCH_HAS_FPU"
/trunk/kernel/generic/include/config.h
36,7 → 36,6
#define KERN_CONFIG_H_
 
#include <arch/types.h>
#include <typedefs.h>
#include <arch/mm/page.h>
 
#define STACK_SIZE PAGE_SIZE
/trunk/kernel/generic/include/time/timeout.h
36,27 → 36,10
#define KERN_TIMEOUT_H_
 
#include <arch/types.h>
#include <cpu.h>
#include <synch/spinlock.h>
#include <adt/list.h>
#include <proc/task.h>
 
#define us2ticks(us) ((uint64_t)(((uint32_t) (us)/(1000000/HZ))))
#define us2ticks(us) ((uint64_t) (((uint32_t) (us) / (1000000 / HZ))))
 
typedef void (* timeout_handler_t)(void *arg);
 
typedef struct {
SPINLOCK_DECLARE(lock);
 
link_t link; /**< Link to the list of active timeouts on THE->cpu */
uint64_t ticks; /**< Timeout will be activated in this amount of clock() ticks. */
 
timeout_handler_t handler; /**< Function that will be called on timeout activation. */
void *arg; /**< Argument to be passed to handler() function. */
cpu_t *cpu; /**< On which processor is this timeout registered. */
} timeout_t;
 
extern void timeout_init(void);
extern void timeout_initialize(timeout_t *t);
extern void timeout_reinitialize(timeout_t *t);
/trunk/kernel/generic/include/proc/scheduler.h
37,7 → 37,6
 
#include <synch/spinlock.h>
#include <time/clock.h> /* HZ */
#include <typedefs.h>
#include <atomic.h>
#include <adt/list.h>
 
/trunk/kernel/generic/include/proc/task.h
1,4 → 1,4
/*
/*3D
* Copyright (c) 2001-2004 Jakub Jermar
* All rights reserved.
*
35,18 → 35,65
#ifndef KERN_TASK_H_
#define KERN_TASK_H_
 
#include <typedefs.h>
#include <synch/spinlock.h>
#include <synch/mutex.h>
#include <synch/rwlock.h>
#include <synch/futex.h>
#include <adt/btree.h>
#include <adt/list.h>
#include <ipc/ipc.h>
#include <security/cap.h>
#include <arch/proc/task.h>
#include <arch/proc/thread.h>
#include <arch/context.h>
#include <arch/fpu_context.h>
#include <arch/cpu.h>
#include <mm/tlb.h>
#include <proc/scheduler.h>
 
#define IPC_MAX_PHONES 16
#define THREAD_NAME_BUFLEN 20
 
struct answerbox;
struct task;
struct thread;
 
typedef enum {
IPC_PHONE_FREE = 0, /**< Phone is free and can be allocated */
IPC_PHONE_CONNECTING, /**< Phone is connecting somewhere */
IPC_PHONE_CONNECTED, /**< Phone is connected */
IPC_PHONE_HUNGUP, /**< Phone is hung up, waiting for answers to come */
IPC_PHONE_SLAMMED /**< Phone was hungup from server */
} ipc_phone_state_t;
 
/** Structure identifying phone (in TASK structure) */
typedef struct {
SPINLOCK_DECLARE(lock);
link_t link;
struct answerbox *callee;
ipc_phone_state_t state;
atomic_t active_calls;
} phone_t;
 
typedef struct answerbox {
SPINLOCK_DECLARE(lock);
 
struct task *task;
 
waitq_t wq;
 
link_t connected_phones; /**< Phones connected to this answerbox */
link_t calls; /**< Received calls */
link_t dispatched_calls; /* Should be hash table in the future */
 
link_t answers; /**< Answered calls */
 
SPINLOCK_DECLARE(irq_lock);
link_t irq_notifs; /**< Notifications from IRQ handlers */
link_t irq_head; /**< IRQs with notifications to this answerbox. */
} answerbox_t;
 
/** Task structure. */
struct task {
typedef struct task {
/** Task lock.
*
* Must be acquired before threads_lock and thread lock of any of its threads.
54,7 → 101,7
SPINLOCK_DECLARE(lock);
char *name;
thread_t *main_thread; /**< Pointer to the main thread. */
struct thread *main_thread; /**< Pointer to the main thread. */
link_t th_head; /**< List of threads contained in this task. */
as_t *as; /**< Address space. */
task_id_t taskid; /**< Unique identity of task */
84,8 → 131,165
btree_t futexes; /**< B+tree of futexes referenced by this task. */
uint64_t cycles; /**< Accumulated accounting. */
};
} task_t;
 
/** CPU structure.
*
* There is one structure like this for every processor.
*/
typedef struct {
SPINLOCK_DECLARE(lock);
 
tlb_shootdown_msg_t tlb_messages[TLB_MESSAGE_QUEUE_LEN];
count_t tlb_messages_count;
context_t saved_context;
 
atomic_t nrdy;
runq_t rq[RQ_COUNT];
volatile count_t needs_relink;
 
SPINLOCK_DECLARE(timeoutlock);
link_t timeout_active_head;
 
count_t missed_clock_ticks; /**< When system clock loses a tick, it is recorded here
so that clock() can react. This variable is
CPU-local and can be only accessed when interrupts
are disabled. */
 
/**
* Processor ID assigned by kernel.
*/
int id;
int active;
int tlb_active;
 
uint16_t frequency_mhz;
uint32_t delay_loop_const;
 
cpu_arch_t arch;
 
struct thread *fpu_owner;
/**
* Stack used by scheduler when there is no running thread.
*/
uint8_t *stack;
} cpu_t;
 
typedef void (* timeout_handler_t)(void *arg);
 
typedef struct {
SPINLOCK_DECLARE(lock);
 
link_t link; /**< Link to the list of active timeouts on THE->cpu */
uint64_t ticks; /**< Timeout will be activated in this amount of clock() ticks. */
 
timeout_handler_t handler; /**< Function that will be called on timeout activation. */
void *arg; /**< Argument to be passed to handler() function. */
cpu_t *cpu; /**< On which processor is this timeout registered. */
} timeout_t;
 
/** Thread states. */
typedef enum {
Invalid, /**< It is an error, if thread is found in this state. */
Running, /**< State of a thread that is currently executing on some CPU. */
Sleeping, /**< Thread in this state is waiting for an event. */
Ready, /**< State of threads in a run queue. */
Entering, /**< Threads are in this state before they are first readied. */
Exiting, /**< After a thread calls thread_exit(), it is put into Exiting state. */
Undead /**< Threads that were not detached but exited are in the Undead state. */
} state_t;
 
/** Join types. */
typedef enum {
None,
TaskClnp, /**< The thread will be joined by ktaskclnp thread. */
TaskGC /**< The thread will be joined by ktaskgc thread. */
} thread_join_type_t;
 
/** Thread structure. There is one per thread. */
typedef struct thread {
link_t rq_link; /**< Run queue link. */
link_t wq_link; /**< Wait queue link. */
link_t th_link; /**< Links to threads within containing task. */
/** Lock protecting thread structure.
*
* Protects the whole thread structure except list links above.
*/
SPINLOCK_DECLARE(lock);
 
char name[THREAD_NAME_BUFLEN];
 
void (* thread_code)(void *); /**< Function implementing the thread. */
void *thread_arg; /**< Argument passed to thread_code() function. */
 
/** From here, the stored context is restored when the thread is scheduled. */
context_t saved_context;
/** From here, the stored timeout context is restored when sleep times out. */
context_t sleep_timeout_context;
/** From here, the stored interruption context is restored when sleep is interrupted. */
context_t sleep_interruption_context;
 
bool sleep_interruptible; /**< If true, the thread can be interrupted from sleep. */
waitq_t *sleep_queue; /**< Wait queue in which this thread sleeps. */
timeout_t sleep_timeout; /**< Timeout used for timeoutable sleeping. */
volatile int timeout_pending; /**< Flag signalling sleep timeout in progress. */
 
/** True if this thread is executing copy_from_uspace(). False otherwise. */
bool in_copy_from_uspace;
/** True if this thread is executing copy_to_uspace(). False otherwise. */
bool in_copy_to_uspace;
/**
* If true, the thread will not go to sleep at all and will
* call thread_exit() before returning to userspace.
*/
bool interrupted;
thread_join_type_t join_type; /**< Who joinins the thread. */
bool detached; /**< If true, thread_join_timeout() cannot be used on this thread. */
waitq_t join_wq; /**< Waitq for thread_join_timeout(). */
 
fpu_context_t *saved_fpu_context;
int fpu_context_exists;
 
/*
* Defined only if thread doesn't run.
* It means that fpu context is in CPU that last time executes this thread.
* This disables migration.
*/
int fpu_context_engaged;
 
rwlock_type_t rwlock_holder_type;
 
void (* call_me)(void *); /**< Funtion to be called in scheduler before the thread is put asleep. */
void *call_me_with; /**< Argument passed to call_me(). */
 
state_t state; /**< Thread's state. */
int flags; /**< Thread's flags. */
cpu_t *cpu; /**< Thread's CPU. */
task_t *task; /**< Containing task. */
 
uint64_t ticks; /**< Ticks before preemption. */
uint64_t cycles; /**< Thread accounting. */
uint64_t last_cycle; /**< Last sampled cycle. */
bool uncounted; /**< Thread doesn't affect accumulated accounting. */
 
int priority; /**< Thread's priority. Implemented as index to CPU->rq */
uint32_t tid; /**< Thread ID. */
thread_arch_t arch; /**< Architecture-specific data. */
 
uint8_t *kstack; /**< Thread's kernel stack. */
} thread_t;
 
extern spinlock_t tasks_lock;
extern btree_t tasks_btree;
 
97,7 → 301,10
extern int task_kill(task_id_t id);
extern uint64_t task_get_accounting(task_t *t);
 
extern void cap_set(task_t *t, cap_t caps);
extern cap_t cap_get(task_t *t);
 
 
#ifndef task_create_arch
extern void task_create_arch(task_t *t);
#endif
/trunk/kernel/generic/include/proc/thread.h
35,129 → 35,25
#ifndef KERN_THREAD_H_
#define KERN_THREAD_H_
 
#include <arch/proc/thread.h>
#include <synch/spinlock.h>
#include <arch/context.h>
#include <fpu_context.h>
#include <arch/types.h>
#include <typedefs.h>
#include <time/timeout.h>
#include <synch/waitq.h>
#include <proc/task.h>
#include <cpu.h>
#include <synch/rwlock.h>
#include <synch/synch.h>
#include <config.h>
#include <adt/btree.h>
#include <adt/list.h>
#include <mm/slab.h>
#include <arch/cpu.h>
#include <mm/tlb.h>
#include <proc/uarg.h>
 
#define THREAD_STACK_SIZE STACK_SIZE
 
/** Thread states. */
typedef enum {
Invalid, /**< It is an error, if thread is found in this state. */
Running, /**< State of a thread that is currently executing on some CPU. */
Sleeping, /**< Thread in this state is waiting for an event. */
Ready, /**< State of threads in a run queue. */
Entering, /**< Threads are in this state before they are first readied. */
Exiting, /**< After a thread calls thread_exit(), it is put into Exiting state. */
Undead /**< Threads that were not detached but exited are in the Undead state. */
} state_t;
 
extern char *thread_states[];
 
/** Join types. */
typedef enum {
None,
TaskClnp, /**< The thread will be joined by ktaskclnp thread. */
TaskGC /**< The thread will be joined by ktaskgc thread. */
} thread_join_type_t;
 
/* Thread flags */
#define THREAD_FLAG_WIRED (1<<0) /**< Thread cannot be migrated to another CPU. */
#define THREAD_FLAG_STOLEN (1<<1) /**< Thread was migrated to another CPU and has not run yet. */
#define THREAD_FLAG_USPACE (1<<2) /**< Thread executes in userspace. */
#define THREAD_FLAG_WIRED (1 << 0) /**< Thread cannot be migrated to another CPU. */
#define THREAD_FLAG_STOLEN (1 << 1) /**< Thread was migrated to another CPU and has not run yet. */
#define THREAD_FLAG_USPACE (1 << 2) /**< Thread executes in userspace. */
 
#define THREAD_NAME_BUFLEN 20
 
/** Thread structure. There is one per thread. */
struct thread {
link_t rq_link; /**< Run queue link. */
link_t wq_link; /**< Wait queue link. */
link_t th_link; /**< Links to threads within containing task. */
/** Lock protecting thread structure.
*
* Protects the whole thread structure except list links above.
*/
SPINLOCK_DECLARE(lock);
 
char name[THREAD_NAME_BUFLEN];
 
void (* thread_code)(void *); /**< Function implementing the thread. */
void *thread_arg; /**< Argument passed to thread_code() function. */
 
/** From here, the stored context is restored when the thread is scheduled. */
context_t saved_context;
/** From here, the stored timeout context is restored when sleep times out. */
context_t sleep_timeout_context;
/** From here, the stored interruption context is restored when sleep is interrupted. */
context_t sleep_interruption_context;
 
bool sleep_interruptible; /**< If true, the thread can be interrupted from sleep. */
waitq_t *sleep_queue; /**< Wait queue in which this thread sleeps. */
timeout_t sleep_timeout; /**< Timeout used for timeoutable sleeping. */
volatile int timeout_pending; /**< Flag signalling sleep timeout in progress. */
 
/** True if this thread is executing copy_from_uspace(). False otherwise. */
bool in_copy_from_uspace;
/** True if this thread is executing copy_to_uspace(). False otherwise. */
bool in_copy_to_uspace;
/**
* If true, the thread will not go to sleep at all and will
* call thread_exit() before returning to userspace.
*/
bool interrupted;
thread_join_type_t join_type; /**< Who joinins the thread. */
bool detached; /**< If true, thread_join_timeout() cannot be used on this thread. */
waitq_t join_wq; /**< Waitq for thread_join_timeout(). */
 
fpu_context_t *saved_fpu_context;
int fpu_context_exists;
 
/*
* Defined only if thread doesn't run.
* It means that fpu context is in CPU that last time executes this thread.
* This disables migration.
*/
int fpu_context_engaged;
 
rwlock_type_t rwlock_holder_type;
 
void (* call_me)(void *); /**< Funtion to be called in scheduler before the thread is put asleep. */
void *call_me_with; /**< Argument passed to call_me(). */
 
state_t state; /**< Thread's state. */
int flags; /**< Thread's flags. */
cpu_t *cpu; /**< Thread's CPU. */
task_t *task; /**< Containing task. */
 
uint64_t ticks; /**< Ticks before preemption. */
uint64_t cycles; /**< Thread accounting. */
uint64_t last_cycle; /**< Last sampled cycle. */
bool uncounted; /**< Thread doesn't affect accumulated accounting. */
 
int priority; /**< Thread's priority. Implemented as index to CPU->rq */
uint32_t tid; /**< Thread ID. */
thread_arch_t arch; /**< Architecture-specific data. */
 
uint8_t *kstack; /**< Thread's kernel stack. */
};
 
/** Thread list lock.
*
* This lock protects all link_t structures chained in threads_head.
195,6 → 91,7
extern void thread_destroy(thread_t *t);
extern void thread_update_accounting(void);
extern bool thread_exists(thread_t *t);
extern void thread_interrupt_sleep(thread_t *t);
 
/* Fpu context slab cache */
extern slab_cache_t *fpu_context_slab;
/trunk/kernel/generic/include/lib/rd.h
36,7 → 36,6
#define KERN_RD_H_
 
#include <arch/types.h>
#include <typedefs.h>
 
/**
* RAM disk version
/trunk/kernel/generic/include/lib/elf.h
37,7 → 37,6
 
#include <arch/elf.h>
#include <arch/types.h>
#include <typedefs.h>
 
/**
* current ELF version
335,7 → 334,6
typedef struct elf64_symbol elf_symbol_t;
#endif
 
extern int elf_load(elf_header_t *header, as_t * as);
extern char *elf_error(int rc);
 
#endif
/trunk/kernel/generic/include/debug.h
42,7 → 42,7
 
#ifndef HERE
/** Current Instruction Pointer address */
# define HERE ((uintptr_t *)0)
# define HERE ((uintptr_t *) 0)
#endif
 
/** Debugging ASSERT macro
/trunk/kernel/generic/include/cpu.h
35,64 → 35,10
#ifndef KERN_CPU_H_
#define KERN_CPU_H_
 
#include <arch/cpu.h>
#include <proc/scheduler.h>
#include <synch/spinlock.h>
#include <synch/waitq.h>
#include <arch/types.h>
#include <typedefs.h>
#include <arch/context.h>
#include <config.h>
#include <adt/list.h>
#include <mm/tlb.h>
#include <proc/thread.h>
 
#define CPU_STACK_SIZE STACK_SIZE
 
/** CPU structure.
*
* There is one structure like this for every processor.
*/
typedef struct {
SPINLOCK_DECLARE(lock);
 
tlb_shootdown_msg_t tlb_messages[TLB_MESSAGE_QUEUE_LEN];
count_t tlb_messages_count;
context_t saved_context;
 
atomic_t nrdy;
runq_t rq[RQ_COUNT];
volatile count_t needs_relink;
 
SPINLOCK_DECLARE(timeoutlock);
link_t timeout_active_head;
 
count_t missed_clock_ticks; /**< When system clock loses a tick, it is recorded here
so that clock() can react. This variable is
CPU-local and can be only accessed when interrupts
are disabled. */
 
/**
* Processor ID assigned by kernel.
*/
int id;
int active;
int tlb_active;
 
uint16_t frequency_mhz;
uint32_t delay_loop_const;
 
cpu_arch_t arch;
 
thread_t *fpu_owner;
/**
* Stack used by scheduler when there is no running thread.
*/
uint8_t *stack;
} cpu_t;
 
extern cpu_t *cpus;
 
extern void cpu_init(void);
/trunk/kernel/generic/include/interrupt.h
36,14 → 36,15
#define KERN_INTERRUPT_H_
 
#include <arch/interrupt.h>
#include <typedefs.h>
#include <arch/types.h>
#include <proc/task.h>
#include <proc/thread.h>
#include <arch.h>
#include <console/klog.h>
#include <ipc/irq.h>
#include <ddi/irq.h>
 
typedef void (* iroutine)(int n, istate_t *istate);
 
#define fault_if_from_uspace(istate, cmd, ...) \
{ \
if (istate_from_uspace(istate)) { \
54,7 → 55,6
} \
}
 
 
extern iroutine exc_register(int n, const char *name, iroutine f);
extern void exc_dispatch(int n, istate_t *t);
void exc_init(void);
/trunk/kernel/generic/include/main/main.h
35,8 → 35,6
#ifndef KERN_MAIN_H_
#define KERN_MAIN_H_
 
#include <typedefs.h>
 
extern uintptr_t stack_safe;
 
#endif
/trunk/kernel/generic/include/synch/futex.h
36,7 → 36,6
#define KERN_FUTEX_H_
 
#include <arch/types.h>
#include <typedefs.h>
#include <synch/waitq.h>
#include <genarch/mm/page_ht.h>
#include <genarch/mm/page_pt.h>
/trunk/kernel/generic/include/synch/rwlock.h
36,7 → 36,6
#define KERN_RWLOCK_H_
 
#include <arch/types.h>
#include <typedefs.h>
#include <synch/mutex.h>
#include <synch/synch.h>
#include <synch/spinlock.h>
/trunk/kernel/generic/include/synch/mutex.h
36,7 → 36,6
#define KERN_MUTEX_H_
 
#include <arch/types.h>
#include <typedefs.h>
#include <synch/semaphore.h>
#include <synch/synch.h>
 
/trunk/kernel/generic/include/synch/spinlock.h
36,7 → 36,6
#define KERN_SPINLOCK_H_
 
#include <arch/types.h>
#include <typedefs.h>
#include <preemption.h>
#include <atomic.h>
#include <debug.h>
97,7 → 96,7
*/
CS_LEAVE_BARRIER();
atomic_set(&sl->val,0);
atomic_set(&sl->val, 0);
preemption_enable();
}
 
109,7 → 108,7
#define SPINLOCK_DECLARE(name)
#define SPINLOCK_INITIALIZE(name)
 
#define spinlock_initialize(x,name)
#define spinlock_initialize(x, name)
#define spinlock_lock(x) preemption_disable()
#define spinlock_trylock(x) (preemption_disable(), 1)
#define spinlock_unlock(x) preemption_enable()
/trunk/kernel/generic/include/synch/semaphore.h
36,7 → 36,6
#define KERN_SEMAPHORE_H_
 
#include <arch/types.h>
#include <typedefs.h>
#include <synch/waitq.h>
#include <synch/synch.h>
 
45,11 → 44,11
} semaphore_t;
 
#define semaphore_down(s) \
_semaphore_down_timeout((s),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NONE)
_semaphore_down_timeout((s), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE)
#define semaphore_trydown(s) \
_semaphore_down_timeout((s),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NON_BLOCKING)
#define semaphore_down_timeout(s,usec) \
_semaphore_down_timeout((s),(usec),SYNCH_FLAGS_NONE)
_semaphore_down_timeout((s), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING)
#define semaphore_down_timeout(s, usec) \
_semaphore_down_timeout((s), (usec), SYNCH_FLAGS_NONE)
 
extern void semaphore_initialize(semaphore_t *s, int val);
extern int _semaphore_down_timeout(semaphore_t *s, uint32_t usec, int flags);
/trunk/kernel/generic/include/synch/waitq.h
36,7 → 36,6
#define KERN_WAITQ_H_
 
#include <arch/types.h>
#include <typedefs.h>
#include <synch/spinlock.h>
#include <synch/synch.h>
#include <adt/list.h>
58,7 → 57,7
} waitq_t;
 
#define waitq_sleep(wq) \
waitq_sleep_timeout((wq),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NONE)
waitq_sleep_timeout((wq), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE)
 
extern void waitq_initialize(waitq_t *wq);
extern int waitq_sleep_timeout(waitq_t *wq, uint32_t usec, int flags);
67,7 → 66,6
extern void waitq_sleep_finish(waitq_t *wq, int rc, ipl_t ipl);
extern void waitq_wakeup(waitq_t *wq, bool all);
extern void _waitq_wakeup_unsafe(waitq_t *wq, bool all);
extern void waitq_interrupt_sleep(thread_t *t);
 
#endif
 
/trunk/kernel/generic/include/bitops.h
35,9 → 35,7
#ifndef KERN_BITOPS_H_
#define KERN_BITOPS_H_
 
#include <typedefs.h>
 
 
/** Return position of first non-zero bit from left (i.e. [log_2(arg)]).
*
* If number is zero, it returns 0
/trunk/kernel/generic/include/memstr.h
35,7 → 35,6
#ifndef KERN_MEMSTR_H_
#define KERN_MEMSTR_H_
 
#include <typedefs.h>
#include <arch/types.h>
#include <arch/memstr.h>
 
/trunk/kernel/generic/include/ddi/irq.h
35,11 → 35,40
#ifndef KERN_IRQ_H_
#define KERN_IRQ_H_
 
typedef enum {
CMD_MEM_READ_1 = 0,
CMD_MEM_READ_2,
CMD_MEM_READ_4,
CMD_MEM_READ_8,
CMD_MEM_WRITE_1,
CMD_MEM_WRITE_2,
CMD_MEM_WRITE_4,
CMD_MEM_WRITE_8,
CMD_PORT_READ_1,
CMD_PORT_WRITE_1,
CMD_IA64_GETCHAR,
CMD_PPC32_GETCHAR,
CMD_LAST
} irq_cmd_type;
 
typedef struct {
irq_cmd_type cmd;
void *addr;
unsigned long long value;
int dstarg;
} irq_cmd_t;
 
typedef struct {
unsigned int cmdcount;
irq_cmd_t *cmds;
} irq_code_t;
 
#ifdef KERNEL
 
#include <arch/types.h>
#include <typedefs.h>
#include <adt/list.h>
#include <ipc/irq.h>
#include <synch/spinlock.h>
#include <proc/task.h>
 
typedef enum {
IRQ_DECLINE, /**< Decline to service. */
51,8 → 80,27
IRQ_TRIGGER_EDGE
} irq_trigger_t;
 
typedef void (* irq_handler_t)(irq_t *irq, void *arg, ...);
struct irq;
typedef void (* irq_handler_t)(struct irq *irq, void *arg, ...);
 
 
 
/** IPC notification config structure.
*
* Primarily, this structure is encapsulated in the irq_t structure.
* It is protected by irq_t::lock.
*/
typedef struct {
bool notify; /**< When false, notifications are not sent. */
answerbox_t *answerbox; /**< Answerbox for notifications. */
unative_t method; /**< Method to be used for the notification. */
irq_code_t *code; /**< Top-half pseudocode. */
count_t counter; /**< Counter. */
link_t link; /**< Link between IRQs that are notifying the
same answerbox. The list is protected by
the answerbox irq_lock. */
} ipc_notif_cfg_t;
 
/** Structure representing one device IRQ.
*
* If one device has multiple interrupts, there will
59,7 → 107,7
* be multiple irq_t instantions with the same
* devno.
*/
struct irq {
typedef struct irq {
/** Hash table link. */
link_t link;
 
86,7 → 134,7
 
/** Notification configuration structure. */
ipc_notif_cfg_t notif_cfg;
};
} irq_t;
 
extern void irq_init(count_t inrs, count_t chains);
extern void irq_initialize(irq_t *irq);
96,5 → 144,7
 
#endif
 
#endif
 
/** @}
*/
/trunk/kernel/generic/include/ddi/device.h
35,8 → 35,6
#ifndef KERN_DEVICE_H_
#define KERN_DEVICE_H_
 
#include <typedefs.h>
 
extern devno_t device_assign_devno(void);
 
#endif
/trunk/kernel/generic/include/ddi/ddi.h
37,7 → 37,7
 
#include <ddi/ddi_arg.h>
#include <arch/types.h>
#include <typedefs.h>
#include <proc/task.h>
 
/** Structure representing contiguous physical memory area. */
typedef struct {
/trunk/kernel/generic/include/printf/printf_core.h
35,7 → 35,7
#ifndef KERN_PRINTF_CORE_H_
#define KERN_PRINTF_CORE_H_
 
#include <typedefs.h>
#include <arch/types.h>
#include <arch/arg.h>
 
/** Structure for specifying output methods for different printf clones. */
/trunk/kernel/generic/include/console/chardev.h
35,7 → 35,6
#ifndef KERN_CHARDEV_H_
#define KERN_CHARDEV_H_
 
#include <typedefs.h>
#include <arch/types.h>
#include <synch/waitq.h>
#include <synch/spinlock.h>
42,19 → 41,19
 
#define CHARDEV_BUFLEN 512
 
struct chardev;
 
/* Character device operations interface. */
struct chardev_operations {
void (* suspend)(chardev_t *); /**< Suspend pushing characters. */
void (* resume)(chardev_t *); /**< Resume pushing characters. */
void (* write)(chardev_t *, char c); /**< Write character to stream. */
typedef struct {
void (* suspend)(struct chardev *); /**< Suspend pushing characters. */
void (* resume)(struct chardev *); /**< Resume pushing characters. */
void (* write)(struct chardev *, char c); /**< Write character to stream. */
/** Read character directly from device, assume interrupts disabled */
char (* read)(chardev_t *);
};
char (* read)(struct chardev *);
} chardev_operations_t;
 
typedef struct chardev_operations chardev_operations_t;
 
/** Character input device. */
struct chardev {
typedef struct chardev {
char *name;
waitq_t wq;
64,7 → 63,7
chardev_operations_t *op; /**< Implementation of chardev operations. */
index_t index;
void *data;
};
} chardev_t;
 
extern void chardev_initialize(char *name,
chardev_t *chardev,
/trunk/kernel/generic/include/console/kconsole.h
35,7 → 35,6
#ifndef KERN_KCONSOLE_H_
#define KERN_KCONSOLE_H_
 
#include <typedefs.h>
#include <adt/list.h>
#include <synch/spinlock.h>
 
42,24 → 41,24
#define MAX_CMDLINE 256
#define KCONSOLE_HISTORY 10
 
enum cmd_arg_type {
typedef enum {
ARG_TYPE_INVALID = 0,
ARG_TYPE_INT,
ARG_TYPE_STRING,
ARG_TYPE_VAR /**< Variable type - either symbol or string */
};
} cmd_arg_type_t;
 
/** Structure representing one argument of kconsole command line. */
struct cmd_arg {
typedef struct {
cmd_arg_type_t type; /**< Type descriptor. */
void *buffer; /**< Buffer where to store data. */
size_t len; /**< Size of the buffer. */
unative_t intval; /**< Integer value */
cmd_arg_type_t vartype; /**< Resulting type of variable arg */
};
} cmd_arg_t;
 
/** Structure representing one kconsole command. */
struct cmd_info {
typedef struct {
link_t link; /**< Command list link. */
SPINLOCK_DECLARE(lock); /**< This lock protects everything below. */
const char *name; /**< Command name. */
68,7 → 67,7
count_t argc; /**< Number of arguments. */
cmd_arg_t *argv; /**< Argument vector. */
void (* help)(void); /**< Function for printing detailed help. */
};
} cmd_info_t;
 
extern spinlock_t cmd_lock;
extern link_t cmd_head;
/trunk/kernel/generic/include/console/console.h
36,7 → 36,7
#define KERN_CONSOLE_H_
 
#include <arch/types.h>
#include <typedefs.h>
#include <console/chardev.h>
 
extern chardev_t *stdin;
extern chardev_t *stdout;
/trunk/kernel/generic/include/console/cmd.h
35,7 → 35,7
#ifndef KERN_CMD_H_
#define KERN_CMD_H_
 
#include <typedefs.h>
#include <console/kconsole.h>
 
extern void cmd_initialize(cmd_info_t *cmd);
extern void cmd_init(void);
/trunk/kernel/generic/include/arch.h
35,14 → 35,9
#ifndef KERN_ARCH_H_
#define KERN_ARCH_H_
 
#include <arch/types.h>
#include <arch/arch.h>
#include <typedefs.h>
#include <proc/task.h>
 
#include <cpu.h>
#include <arch/cpu.h>
#include <arch/asm.h>
 
#define DEFAULT_CONTEXT 0
 
#define CPU THE->cpu
/trunk/kernel/generic/include/security/cap.h
49,7 → 49,6
 
#include <syscall/sysarg64.h>
#include <arch/types.h>
#include <typedefs.h>
 
/**
* CAP_CAP allows its holder to grant/revoke arbitrary
81,9 → 80,6
 
typedef uint32_t cap_t;
 
extern void cap_set(task_t *t, cap_t caps);
extern cap_t cap_get(task_t *t);
 
extern unative_t sys_cap_grant(sysarg64_t *uspace_taskid_arg, cap_t caps);
extern unative_t sys_cap_revoke(sysarg64_t *uspace_taskid_arg, cap_t caps);
 
/trunk/kernel/generic/include/adt/hash_table.h
37,18 → 37,9
 
#include <adt/list.h>
#include <arch/types.h>
#include <typedefs.h>
 
/** Hash table structure. */
struct hash_table {
link_t *entry;
count_t entries;
count_t max_keys;
hash_table_operations_t *op;
};
 
/** Set of operations for hash table. */
struct hash_table_operations {
typedef struct {
/** Hash function.
*
* @param key Array of keys needed to compute hash index. All keys must be passed.
70,8 → 61,16
* @param item Item that was removed from the hash table.
*/
void (*remove_callback)(link_t *item);
};
} hash_table_operations_t;
 
/** Hash table structure. */
typedef struct {
link_t *entry;
count_t entries;
count_t max_keys;
hash_table_operations_t *op;
} hash_table_t;
 
#define hash_table_get_instance(item, type, member) list_get_instance((item), type, member)
 
extern void hash_table_create(hash_table_t *h, count_t m, count_t max_keys, hash_table_operations_t *op);
/trunk/kernel/generic/include/adt/list.h
36,13 → 36,12
#define KERN_LIST_H_
 
#include <arch/types.h>
#include <typedefs.h>
 
/** Doubly linked list head and link type. */
struct link {
link_t *prev; /**< Pointer to the previous item in the list. */
link_t *next; /**< Pointer to the next item in the list. */
};
typedef struct link {
struct link *prev; /**< Pointer to the previous item in the list. */
struct link *next; /**< Pointer to the next item in the list. */
} link_t;
 
/** Declare and initialize statically allocated list.
*
/trunk/kernel/generic/include/adt/bitmap.h
36,7 → 36,6
#define KERN_BITMAP_H_
 
#include <arch/types.h>
#include <typedefs.h>
 
#define BITS2BYTES(bits) (bits ? ((((bits)-1)>>3)+1) : 0)
 
/trunk/kernel/generic/include/adt/btree.h
36,7 → 36,6
#define KERN_BTREE_H_
 
#include <arch/types.h>
#include <typedefs.h>
#include <adt/list.h>
 
#define BTREE_M 5
45,7 → 44,7
typedef uint64_t btree_key_t;
 
/** B-tree node structure. */
struct btree_node {
typedef struct btree_node {
/** Number of keys. */
count_t keys;
 
65,10 → 64,10
* ...
* There is room for storing a subtree pointer for the extra key.
*/
btree_node_t *subtree[BTREE_M + 1];
struct btree_node *subtree[BTREE_M + 1];
 
/** Pointer to parent node. Root node has NULL parent. */
btree_node_t *parent;
struct btree_node *parent;
 
/** Link connecting leaf-level nodes. Defined only when this node is a leaf. */
link_t leaf_link;
76,13 → 75,13
/** Variables needed by btree_print(). */
link_t bfs_link;
int depth;
};
} btree_node_t;
 
/** B-tree structure. */
struct btree {
typedef struct {
btree_node_t *root; /**< B-tree root node pointer. */
link_t leaf_head; /**< Leaf-level list head. */
};
} btree_t;
 
extern void btree_init(void);
 
/trunk/kernel/generic/include/adt/fifo.h
45,7 → 45,6
#ifndef KERN_FIFO_H_
#define KERN_FIFO_H_
 
#include <typedefs.h>
#include <mm/slab.h>
 
/** Create and initialize static FIFO.
/trunk/kernel/generic/include/mm/mm.h
0,0 → 1,67
/*
* Copyright (c) 2007 Martin Decky
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup genericmm
* @{
*/
/** @file
*/
 
#ifndef KERN_MM_H_
#define KERN_MM_H_
 
#define PAGE_CACHEABLE_SHIFT 0
#define PAGE_NOT_CACHEABLE_SHIFT PAGE_CACHEABLE_SHIFT
#define PAGE_PRESENT_SHIFT 1
#define PAGE_NOT_PRESENT_SHIFT PAGE_PRESENT_SHIFT
#define PAGE_USER_SHIFT 2
#define PAGE_KERNEL_SHIFT PAGE_USER_SHIFT
#define PAGE_READ_SHIFT 3
#define PAGE_WRITE_SHIFT 4
#define PAGE_EXEC_SHIFT 5
#define PAGE_GLOBAL_SHIFT 6
 
#define PAGE_NOT_CACHEABLE (0 << PAGE_CACHEABLE_SHIFT)
#define PAGE_CACHEABLE (1 << PAGE_CACHEABLE_SHIFT)
 
#define PAGE_PRESENT (0 << PAGE_PRESENT_SHIFT)
#define PAGE_NOT_PRESENT (1 << PAGE_PRESENT_SHIFT)
 
#define PAGE_USER (1 << PAGE_USER_SHIFT)
#define PAGE_KERNEL (0 << PAGE_USER_SHIFT)
 
#define PAGE_READ (1 << PAGE_READ_SHIFT)
#define PAGE_WRITE (1 << PAGE_WRITE_SHIFT)
#define PAGE_EXEC (1 << PAGE_EXEC_SHIFT)
 
#define PAGE_GLOBAL (1 << PAGE_GLOBAL_SHIFT)
 
#endif
 
/** @}
*/
/trunk/kernel/generic/include/mm/frame.h
37,7 → 37,6
#define KERN_FRAME_H_
 
#include <arch/types.h>
#include <typedefs.h>
#include <adt/list.h>
#include <synch/spinlock.h>
#include <mm/buddy.h>
/trunk/kernel/generic/include/mm/page.h
35,59 → 35,22
#ifndef KERN_PAGE_H_
#define KERN_PAGE_H_
 
#include <arch/mm/asid.h>
#include <arch/types.h>
#include <typedefs.h>
#include <mm/as.h>
#include <memstr.h>
 
#define PAGE_CACHEABLE_SHIFT 0
#define PAGE_NOT_CACHEABLE_SHIFT PAGE_CACHEABLE_SHIFT
#define PAGE_PRESENT_SHIFT 1
#define PAGE_NOT_PRESENT_SHIFT PAGE_PRESENT_SHIFT
#define PAGE_USER_SHIFT 2
#define PAGE_KERNEL_SHIFT PAGE_USER_SHIFT
#define PAGE_READ_SHIFT 3
#define PAGE_WRITE_SHIFT 4
#define PAGE_EXEC_SHIFT 5
#define PAGE_GLOBAL_SHIFT 6
 
#define PAGE_NOT_CACHEABLE (0<<PAGE_CACHEABLE_SHIFT)
#define PAGE_CACHEABLE (1<<PAGE_CACHEABLE_SHIFT)
 
#define PAGE_PRESENT (0<<PAGE_PRESENT_SHIFT)
#define PAGE_NOT_PRESENT (1<<PAGE_PRESENT_SHIFT)
 
#define PAGE_USER (1<<PAGE_USER_SHIFT)
#define PAGE_KERNEL (0<<PAGE_USER_SHIFT)
 
#define PAGE_READ (1<<PAGE_READ_SHIFT)
#define PAGE_WRITE (1<<PAGE_WRITE_SHIFT)
#define PAGE_EXEC (1<<PAGE_EXEC_SHIFT)
 
#define PAGE_GLOBAL (1<<PAGE_GLOBAL_SHIFT)
 
 
/**
* Macro for computing page color.
*/
#define PAGE_COLOR(va) (((va) >> PAGE_WIDTH) & ((1 << PAGE_COLOR_BITS) - 1))
 
/** Page fault access type. */
enum pf_access {
PF_ACCESS_READ,
PF_ACCESS_WRITE,
PF_ACCESS_EXEC
};
typedef enum pf_access pf_access_t;
 
/** Operations to manipulate page mappings. */
struct page_mapping_operations {
void (* mapping_insert)(as_t *as, uintptr_t page, uintptr_t frame, int
flags);
typedef struct {
void (* mapping_insert)(as_t *as, uintptr_t page, uintptr_t frame,
int flags);
void (* mapping_remove)(as_t *as, uintptr_t page);
pte_t *(* mapping_find)(as_t *as, uintptr_t page);
};
typedef struct page_mapping_operations page_mapping_operations_t;
} page_mapping_operations_t;
 
extern page_mapping_operations_t *page_mapping_operations;
 
94,8 → 57,8
extern void page_init(void);
extern void page_table_lock(as_t *as, bool lock);
extern void page_table_unlock(as_t *as, bool unlock);
extern void page_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame, int
flags);
extern void page_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame,
int flags);
extern void page_mapping_remove(as_t *as, uintptr_t page);
extern pte_t *page_mapping_find(as_t *as, uintptr_t page);
extern pte_t *page_table_create(int flags);
/trunk/kernel/generic/include/mm/asid.h
44,7 → 44,8
 
#include <arch/mm/asid.h>
#include <synch/spinlock.h>
#include <typedefs.h>
#include <adt/list.h>
#include <mm/as.h>
 
#endif
 
/trunk/kernel/generic/include/mm/tlb.h
37,7 → 37,6
 
#include <arch/mm/asid.h>
#include <arch/types.h>
#include <typedefs.h>
 
/**
* Number of TLB shootdown messages that can be queued in processor
46,22 → 45,20
#define TLB_MESSAGE_QUEUE_LEN 10
 
/** Type of TLB shootdown message. */
enum tlb_invalidate_type {
typedef enum {
TLB_INVL_INVALID = 0, /**< Invalid type. */
TLB_INVL_ALL, /**< Invalidate all entries in TLB. */
TLB_INVL_ASID, /**< Invalidate all entries belonging to one address space. */
TLB_INVL_PAGES /**< Invalidate specified page range belonging to one address space. */
};
typedef enum tlb_invalidate_type tlb_invalidate_type_t;
} tlb_invalidate_type_t;
 
/** TLB shootdown message. */
struct tlb_shootdown_msg {
typedef struct {
tlb_invalidate_type_t type; /**< Message type. */
asid_t asid; /**< Address space identifier. */
uintptr_t page; /**< Page address. */
count_t count; /**< Number of pages to invalidate. */
};
typedef struct tlb_shootdown_msg tlb_shootdown_msg_t;
} tlb_shootdown_msg_t;
 
extern void tlb_init(void);
 
/trunk/kernel/generic/include/mm/as.h
47,7 → 47,6
#include <arch/mm/as.h>
#include <arch/mm/asid.h>
#include <arch/types.h>
#include <typedefs.h>
#include <synch/spinlock.h>
#include <synch/mutex.h>
#include <adt/list.h>
66,55 → 65,21
 
#define FLAG_AS_KERNEL (1 << 0) /**< Kernel address space. */
 
/** Address space structure.
*
* as_t contains the list of as_areas of userspace accessible
* pages for one or more tasks. Ranges of kernel memory pages are not
* supposed to figure in the list as they are shared by all tasks and
* set up during system initialization.
*/
struct as {
/** Protected by asidlock. */
link_t inactive_as_with_asid_link;
/** Address space area attributes. */
#define AS_AREA_ATTR_NONE 0
#define AS_AREA_ATTR_PARTIAL 1 /**< Not fully initialized area. */
 
mutex_t lock;
#define AS_PF_FAULT 0 /**< The page fault was not resolved by as_page_fault(). */
#define AS_PF_OK 1 /**< The page fault was resolved by as_page_fault(). */
#define AS_PF_DEFER 2 /**< The page fault was caused by memcpy_from_uspace() or memcpy_to_uspace(). */
 
/** Number of references (i.e tasks that reference this as). */
count_t refcount;
 
/** Number of processors on wich is this address space active. */
count_t cpu_refcount;
 
/** B+tree of address space areas. */
btree_t as_area_btree;
 
/** Page table pointer. Constant on architectures that use global page hash table. */
pte_t *page_table;
 
/** Address space identifier. Constant on architectures that do not support ASIDs.*/
asid_t asid;
/** Architecture specific content. */
as_arch_t arch;
};
 
struct as_operations {
typedef struct {
pte_t *(* page_table_create)(int flags);
void (* page_table_destroy)(pte_t *page_table);
void (* page_table_lock)(as_t *as, bool lock);
void (* page_table_unlock)(as_t *as, bool unlock);
};
typedef struct as_operations as_operations_t;
} as_operations_t;
 
/** Address space area attributes. */
#define AS_AREA_ATTR_NONE 0
#define AS_AREA_ATTR_PARTIAL 1 /**< Not fully initialized area. */
 
#define AS_PF_FAULT 0 /**< The page fault was not resolved by as_page_fault(). */
#define AS_PF_OK 1 /**< The page fault was resolved by as_page_fault(). */
#define AS_PF_DEFER 2 /**< The page fault was caused by memcpy_from_uspace()
or memcpy_to_uspace(). */
 
/** This structure contains information associated with the shared address space area. */
typedef struct {
mutex_t lock; /**< This lock must be acquired only when the as_area lock is held. */
122,15 → 87,17
btree_t pagemap; /**< B+tree containing complete map of anonymous pages of the shared area. */
} share_info_t;
 
/** Address space area backend structure. */
typedef struct {
int (* page_fault)(as_area_t *area, uintptr_t addr, pf_access_t access);
void (* frame_free)(as_area_t *area, uintptr_t page, uintptr_t frame);
void (* share)(as_area_t *area);
} mem_backend_t;
/** Page fault access type. */
typedef enum {
PF_ACCESS_READ,
PF_ACCESS_WRITE,
PF_ACCESS_EXEC
} pf_access_t;
 
struct mem_backend;
 
/** Backend data stored in address space area. */
typedef union {
typedef union mem_backend_data {
struct { /**< elf_backend members */
elf_header_t *elf;
elf_segment_header_t *segment;
146,7 → 113,7
* Each as_area_t structure describes one contiguous area of virtual memory.
* In the future, it should not be difficult to support shared areas.
*/
struct as_area {
typedef struct {
mutex_t lock;
as_t *as; /**< Containing address space. */
int flags; /**< Flags related to the memory represented by the address space area. */
156,12 → 123,19
btree_t used_space; /**< Map of used space. */
share_info_t *sh_info; /**< If the address space area has been shared, this pointer will
reference the share info structure. */
mem_backend_t *backend; /**< Memory backend backing this address space area. */
struct mem_backend *backend; /**< Memory backend backing this address space area. */
 
/** Data to be used by the backend. */
mem_backend_data_t backend_data;
};
} as_area_t;
 
/** Address space area backend structure. */
typedef struct mem_backend {
int (* page_fault)(as_area_t *area, uintptr_t addr, pf_access_t access);
void (* frame_free)(as_area_t *area, uintptr_t page, uintptr_t frame);
void (* share)(as_area_t *area);
} mem_backend_t;
 
extern as_t *AS_KERNEL;
extern as_operations_t *as_operations;
 
206,11 → 180,13
extern void as_deinstall_arch(as_t *as);
#endif /* !def as_deinstall_arch */
 
/* Backend declarations. */
/* Backend declarations and functions. */
extern mem_backend_t anon_backend;
extern mem_backend_t elf_backend;
extern mem_backend_t phys_backend;
 
extern int elf_load(elf_header_t *header, as_t *as);
 
/* Address space area related syscalls. */
extern unative_t sys_as_area_create(uintptr_t address, size_t size, int flags);
extern unative_t sys_as_area_resize(uintptr_t address, size_t size, int flags);
/trunk/kernel/generic/include/mm/buddy.h
36,7 → 36,7
#define KERN_BUDDY_H_
 
#include <arch/types.h>
#include <typedefs.h>
#include <adt/list.h>
 
#define BUDDY_SYSTEM_INNER_BLOCK 0xff
 
/trunk/kernel/generic/include/typedefs.h
35,47 → 35,8
#ifndef KERN_TYPEDEFS_H_
#define KERN_TYPEDEFS_H_
 
#define false 0
#define true 1
 
typedef short bool;
 
typedef unsigned long size_t;
typedef unsigned long count_t;
typedef unsigned long index_t;
 
typedef unsigned long long task_id_t;
typedef unsigned long context_id_t;
 
typedef struct task task_t;
typedef struct thread thread_t;
 
typedef struct as_area as_area_t;
typedef struct as as_t;
 
typedef struct link link_t;
 
typedef struct chardev chardev_t;
 
typedef enum cmd_arg_type cmd_arg_type_t;
typedef struct cmd_arg cmd_arg_t;
typedef struct cmd_info cmd_info_t;
 
typedef struct istate istate_t;
typedef void (* function)();
typedef void (* iroutine)(int n, istate_t *istate);
 
typedef struct hash_table hash_table_t;
typedef struct hash_table_operations hash_table_operations_t;
 
typedef struct btree_node btree_node_t;
typedef struct btree btree_t;
 
typedef signed int inr_t;
typedef signed int devno_t;
typedef struct irq irq_t;
typedef struct ipc_notif_cfg ipc_notif_cfg_t;
 
#endif
 
/** @}
/trunk/kernel/generic/include/macros.h
36,7 → 36,6
#define KERN_MACROS_H_
 
#include <arch/types.h>
#include <typedefs.h>
 
#define is_digit(d) (((d) >= '0') && ((d) <= '9'))
#define is_lower(c) (((c) >= 'a') && ((c) <= 'z'))
/trunk/kernel/generic/include/context.h
36,7 → 36,6
#define KERN_CONTEXT_H_
 
#include <arch/types.h>
#include <typedefs.h>
#include <arch/context.h>
 
 
/trunk/kernel/generic/include/syscall/copy.h
35,7 → 35,7
#ifndef KERN_COPY_H_
#define KERN_COPY_H_
 
#include <typedefs.h>
#include <arch/types.h>
 
/** Label within memcpy_from_uspace() that contains return -1. */
extern char memcpy_from_uspace_failover_address;
/trunk/kernel/generic/include/syscall/syscall.h
71,7 → 71,6
#ifdef KERNEL
 
#include <arch/types.h>
#include <typedefs.h>
 
typedef unative_t (*syshandler_t)();
 
/trunk/kernel/generic/include/ipc/sysipc.h
38,7 → 38,6
#include <ipc/ipc.h>
#include <ipc/irq.h>
#include <arch/types.h>
#include <typedefs.h>
 
unative_t sys_ipc_call_sync_fast(unative_t phoneid, unative_t method,
unative_t arg1, ipc_data_t *data);
/trunk/kernel/generic/include/ipc/ipc.h
158,53 → 158,13
 
#ifdef KERNEL
 
#include <synch/waitq.h>
#include <adt/list.h>
#include <proc/task.h>
 
#define IPC_MAX_PHONES 16
 
typedef struct answerbox_s answerbox_t;
typedef struct phone_s phone_t;
typedef struct {
unative_t args[IPC_CALL_LEN];
phone_t *phone;
} ipc_data_t;
 
struct answerbox_s {
SPINLOCK_DECLARE(lock);
 
task_t *task;
 
waitq_t wq;
 
link_t connected_phones; /**< Phones connected to this answerbox */
link_t calls; /**< Received calls */
link_t dispatched_calls; /* Should be hash table in the future */
 
link_t answers; /**< Answered calls */
 
SPINLOCK_DECLARE(irq_lock);
link_t irq_notifs; /**< Notifications from IRQ handlers */
link_t irq_head; /**< IRQs with notifications to this answerbox. */
};
 
typedef enum {
IPC_PHONE_FREE = 0, /**< Phone is free and can be allocated */
IPC_PHONE_CONNECTING, /**< Phone is connecting somewhere */
IPC_PHONE_CONNECTED, /**< Phone is connected */
IPC_PHONE_HUNGUP, /**< Phone is hung up, waiting for answers to come */
IPC_PHONE_SLAMMED /**< Phone was hungup from server */
} ipc_phone_state_t;
 
/** Structure identifying phone (in TASK structure) */
struct phone_s {
SPINLOCK_DECLARE(lock);
link_t link;
answerbox_t *callee;
ipc_phone_state_t state;
atomic_t active_calls;
};
 
typedef struct {
link_t link;
 
/trunk/kernel/generic/include/ipc/irq.h
38,57 → 38,11
/** Maximum length of IPC IRQ program */
#define IRQ_MAX_PROG_SIZE 10
 
typedef enum {
CMD_MEM_READ_1 = 0,
CMD_MEM_READ_2,
CMD_MEM_READ_4,
CMD_MEM_READ_8,
CMD_MEM_WRITE_1,
CMD_MEM_WRITE_2,
CMD_MEM_WRITE_4,
CMD_MEM_WRITE_8,
CMD_PORT_READ_1,
CMD_PORT_WRITE_1,
CMD_IA64_GETCHAR,
CMD_PPC32_GETCHAR,
CMD_LAST
} irq_cmd_type;
 
typedef struct {
irq_cmd_type cmd;
void *addr;
unsigned long long value;
int dstarg;
} irq_cmd_t;
 
typedef struct {
unsigned int cmdcount;
irq_cmd_t *cmds;
} irq_code_t;
 
#ifdef KERNEL
 
#include <ipc/ipc.h>
#include <typedefs.h>
#include <ddi/irq.h>
#include <arch/types.h>
#include <adt/list.h>
 
/** IPC notification config structure.
*
* Primarily, this structure is encapsulated in the irq_t structure.
* It is protected by irq_t::lock.
*/
struct ipc_notif_cfg {
bool notify; /**< When false, notifications are not sent. */
answerbox_t *answerbox; /**< Answerbox for notifications. */
unative_t method; /**< Method to be used for the notification. */
irq_code_t *code; /**< Top-half pseudocode. */
count_t counter; /**< Counter. */
link_t link; /**< Link between IRQs that are notifying the
same answerbox. The list is protected by
the answerbox irq_lock. */
};
 
extern int ipc_irq_register(answerbox_t *box, inr_t inr, devno_t devno, unative_t method,
irq_code_t *ucode);
extern void ipc_irq_send_notif(irq_t *irq);
98,7 → 52,5
 
#endif
 
#endif
 
/** @}
*/