Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 2105 → Rev 2106

/trunk/kernel/genarch/include/mm/as_ht.h
36,43 → 36,17
#define KERN_AS_HT_H_
 
#include <mm/mm.h>
#include <arch/mm/asid.h>
#include <adt/list.h>
#include <adt/btree.h>
#include <synch/mutex.h>
#include <arch/types.h>
 
/** Address space structure.
*
* as_t contains the list of as_areas of userspace accessible
* pages for one or more tasks. Ranges of kernel memory pages are not
* supposed to figure in the list as they are shared by all tasks and
* set up during system initialization.
*/
typedef struct {
/** Protected by asidlock. */
link_t inactive_as_with_asid_link;
} as_genarch_t;
 
mutex_t lock;
struct as;
 
/** Number of references (i.e tasks that reference this as). */
count_t refcount;
 
/** Number of processors on wich is this address space active. */
count_t cpu_refcount;
 
/** B+tree of address space areas. */
btree_t as_area_btree;
 
/** Address space identifier. Constant on architectures that do not support ASIDs.*/
asid_t asid;
/** Architecture specific content. */
as_arch_t arch;
} as_t;
 
typedef struct {
typedef struct pte {
link_t link; /**< Page hash table link. */
as_t *as; /**< Address space. */
struct as *as; /**< Address space. */
uintptr_t page; /**< Virtual memory page. */
uintptr_t frame; /**< Physical memory frame. */
unsigned g : 1; /**< Global page. */
/trunk/kernel/genarch/include/mm/as_pt.h
36,45 → 36,15
#define KERN_AS_PT_H_
 
#include <mm/mm.h>
#include <arch/mm/asid.h>
#include <adt/list.h>
#include <adt/btree.h>
#include <synch/mutex.h>
#include <arch/types.h>
 
#define AS_PAGE_TABLE
 
/** Address space structure.
*
* as_t contains the list of as_areas of userspace accessible
* pages for one or more tasks. Ranges of kernel memory pages are not
* supposed to figure in the list as they are shared by all tasks and
* set up during system initialization.
*/
typedef struct {
/** Protected by asidlock. */
link_t inactive_as_with_asid_link;
 
mutex_t lock;
 
/** Number of references (i.e tasks that reference this as). */
count_t refcount;
 
/** Number of processors on wich is this address space active. */
count_t cpu_refcount;
 
/** B+tree of address space areas. */
btree_t as_area_btree;
/** Page table pointer. */
pte_t *page_table;
} as_genarch_t;
 
/** Address space identifier. Constant on architectures that do not support ASIDs.*/
asid_t asid;
/** Architecture specific content. */
as_arch_t arch;
} as_t;
 
#endif
 
/** @}
/trunk/kernel/genarch/include/mm/page_ht.h
59,8 → 59,6
#define PTE_WRITABLE(pte) ((pte)->w != 0)
#define PTE_EXECUTABLE(pte) ((pte)->x != 0)
 
#define SET_PTL0_ADDRESS(x)
 
extern as_operations_t as_ht_operations;
extern page_mapping_operations_t ht_mapping_operations;
 
/trunk/kernel/genarch/src/mm/page_pt.c
72,7 → 72,7
pte_t *ptl0, *ptl1, *ptl2, *ptl3;
pte_t *newpt;
 
ptl0 = (pte_t *) PA2KA((uintptr_t) as->page_table);
ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
 
if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT) {
newpt = (pte_t *)frame_alloc(ONE_FRAME, FRAME_KA);
128,7 → 128,7
* First, remove the mapping, if it exists.
*/
 
ptl0 = (pte_t *) PA2KA((uintptr_t) as->page_table);
ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
 
if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT)
return;
244,7 → 244,7
{
pte_t *ptl0, *ptl1, *ptl2, *ptl3;
 
ptl0 = (pte_t *) PA2KA((uintptr_t) as->page_table);
ptl0 = (pte_t *) PA2KA((uintptr_t) as->genarch.page_table);
 
if (GET_PTL1_FLAGS(ptl0, PTL0_INDEX(page)) & PAGE_NOT_PRESENT)
return NULL;
/trunk/kernel/genarch/src/mm/as_pt.c
85,7 → 85,7
ipl = interrupts_disable();
mutex_lock(&AS_KERNEL->lock);
src_ptl0 = (pte_t *) PA2KA((uintptr_t) AS_KERNEL->page_table);
src_ptl0 = (pte_t *) PA2KA((uintptr_t) AS_KERNEL->genarch.page_table);
 
src = (uintptr_t) &src_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)];
dst = (uintptr_t) &dst_ptl0[PTL0_INDEX(KERNEL_ADDRESS_SPACE_START)];
/trunk/kernel/generic/include/time/timeout.h
44,14 → 44,16
typedef struct {
SPINLOCK_DECLARE(lock);
 
link_t link; /**< Link to the list of active timeouts on THE->cpu */
uint64_t ticks; /**< Timeout will be activated in this amount of clock() ticks. */
 
timeout_handler_t handler; /**< Function that will be called on timeout activation. */
void *arg; /**< Argument to be passed to handler() function. */
cpu_t *cpu; /**< On which processor is this timeout registered. */
/** Link to the list of active timeouts on THE->cpu */
link_t link;
/** Timeout will be activated in this amount of clock() ticks. */
uint64_t ticks;
/** Function that will be called on timeout activation. */
timeout_handler_t handler;
/** Argument to be passed to handler() function. */
void *arg;
/** On which processor is this timeout registered. */
cpu_t *cpu;
} timeout_t;
 
#define us2ticks(us) ((uint64_t) (((uint32_t) (us) / (1000000 / HZ))))
59,7 → 61,8
extern void timeout_init(void);
extern void timeout_initialize(timeout_t *t);
extern void timeout_reinitialize(timeout_t *t);
extern void timeout_register(timeout_t *t, uint64_t usec, timeout_handler_t f, void *arg);
extern void timeout_register(timeout_t *t, uint64_t usec, timeout_handler_t f,
void *arg);
extern bool timeout_unregister(timeout_t *t);
 
#endif
/trunk/kernel/generic/include/proc/task.h
58,32 → 58,42
typedef struct task {
/** Task lock.
*
* Must be acquired before threads_lock and thread lock of any of its threads.
* Must be acquired before threads_lock and thread lock of any of its
* threads.
*/
SPINLOCK_DECLARE(lock);
char *name;
struct thread *main_thread; /**< Pointer to the main thread. */
link_t th_head; /**< List of threads contained in this task. */
as_t *as; /**< Address space. */
task_id_t taskid; /**< Unique identity of task */
context_id_t context; /**< Task security context */
/** Pointer to the main thread. */
struct thread *main_thread;
/** List of threads contained in this task. */
link_t th_head;
/** Address space. */
as_t *as;
/** Unique identity of task. */
task_id_t taskid;
/** Task security context. */
context_id_t context;
 
/** If this is true, new threads can become part of the task. */
bool accept_new_threads;
/** Number of references (i.e. threads). */
count_t refcount;
 
count_t refcount; /**< Number of references (i.e. threads). */
/** Task capabilities. */
cap_t capabilities;
 
cap_t capabilities; /**< Task capabilities. */
 
/* IPC stuff */
answerbox_t answerbox; /**< Communication endpoint */
phone_t phones[IPC_MAX_PHONES];
atomic_t active_calls; /**< Active asynchronous messages.
* It is used for limiting uspace to
* certain extent. */
/**
* Active asynchronous messages. It is used for limiting uspace to
* certain extent.
*/
atomic_t active_calls;
task_arch_t arch; /**< Architecture specific task data. */
/** Architecture specific task data. */
task_arch_t arch;
/**
* Serializes access to the B+tree of task's futexes. This mutex is
90,9 → 100,11
* independent on the task spinlock.
*/
mutex_t futexes_lock;
btree_t futexes; /**< B+tree of futexes referenced by this task. */
/** B+tree of futexes referenced by this task. */
btree_t futexes;
uint64_t cycles; /**< Accumulated accounting. */
/** Accumulated accounting. */
uint64_t cycles;
} task_t;
 
SPINLOCK_EXTERN(tasks_lock);
/trunk/kernel/generic/include/proc/thread.h
52,19 → 52,30
extern char *thread_states[];
 
/* Thread flags */
#define THREAD_FLAG_WIRED (1 << 0) /**< Thread cannot be migrated to another CPU. */
#define THREAD_FLAG_STOLEN (1 << 1) /**< Thread was migrated to another CPU and has not run yet. */
#define THREAD_FLAG_USPACE (1 << 2) /**< Thread executes in userspace. */
 
/** Thread cannot be migrated to another CPU. */
#define THREAD_FLAG_WIRED (1 << 0)
/** Thread was migrated to another CPU and has not run yet. */
#define THREAD_FLAG_STOLEN (1 << 1)
/** Thread executes in userspace. */
#define THREAD_FLAG_USPACE (1 << 2)
 
/** Thread states. */
typedef enum {
Invalid, /**< It is an error, if thread is found in this state. */
Running, /**< State of a thread that is currently executing on some CPU. */
Sleeping, /**< Thread in this state is waiting for an event. */
Ready, /**< State of threads in a run queue. */
Entering, /**< Threads are in this state before they are first readied. */
Exiting, /**< After a thread calls thread_exit(), it is put into Exiting state. */
Undead /**< Threads that were not detached but exited are in the Undead state. */
/** It is an error, if thread is found in this state. */
Invalid,
/** State of a thread that is currently executing on some CPU. */
Running,
/** Thread in this state is waiting for an event. */
Sleeping,
/** State of threads in a run queue. */
Ready,
/** Threads are in this state before they are first readied. */
Entering,
/** After a thread calls thread_exit(), it is put into Exiting state. */
Exiting,
/** Threads that were not detached but exited are in the Undead state. */
Undead
} state_t;
 
/** Join types. */
76,9 → 87,9
 
/** Thread structure. There is one per thread. */
typedef struct thread {
link_t rq_link; /**< Run queue link. */
link_t wq_link; /**< Wait queue link. */
link_t th_link; /**< Links to threads within containing task. */
link_t rq_link; /**< Run queue link. */
link_t wq_link; /**< Wait queue link. */
link_t th_link; /**< Links to threads within containing task. */
/** Lock protecting thread structure.
*
88,35 → 99,59
 
char name[THREAD_NAME_BUFLEN];
 
void (* thread_code)(void *); /**< Function implementing the thread. */
void *thread_arg; /**< Argument passed to thread_code() function. */
/** Function implementing the thread. */
void (* thread_code)(void *);
/** Argument passed to thread_code() function. */
void *thread_arg;
 
/** From here, the stored context is restored when the thread is scheduled. */
/**
* From here, the stored context is restored when the thread is
* scheduled.
*/
context_t saved_context;
/** From here, the stored timeout context is restored when sleep times out. */
/**
* From here, the stored timeout context is restored when sleep times
* out.
*/
context_t sleep_timeout_context;
/** From here, the stored interruption context is restored when sleep is interrupted. */
/**
* From here, the stored interruption context is restored when sleep is
* interrupted.
*/
context_t sleep_interruption_context;
 
bool sleep_interruptible; /**< If true, the thread can be interrupted from sleep. */
waitq_t *sleep_queue; /**< Wait queue in which this thread sleeps. */
timeout_t sleep_timeout; /**< Timeout used for timeoutable sleeping. */
volatile int timeout_pending; /**< Flag signalling sleep timeout in progress. */
/** If true, the thread can be interrupted from sleep. */
bool sleep_interruptible;
/** Wait queue in which this thread sleeps. */
waitq_t *sleep_queue;
/** Timeout used for timeoutable sleeping. */
timeout_t sleep_timeout;
/** Flag signalling sleep timeout in progress. */
volatile int timeout_pending;
 
/** True if this thread is executing copy_from_uspace(). False otherwise. */
/**
* True if this thread is executing copy_from_uspace().
* False otherwise.
*/
bool in_copy_from_uspace;
/** True if this thread is executing copy_to_uspace(). False otherwise. */
/**
* True if this thread is executing copy_to_uspace().
* False otherwise.
*/
bool in_copy_to_uspace;
/**
* If true, the thread will not go to sleep at all and will
* call thread_exit() before returning to userspace.
* If true, the thread will not go to sleep at all and will call
* thread_exit() before returning to userspace.
*/
bool interrupted;
thread_join_type_t join_type; /**< Who joinins the thread. */
bool detached; /**< If true, thread_join_timeout() cannot be used on this thread. */
waitq_t join_wq; /**< Waitq for thread_join_timeout(). */
/** Who joinins the thread. */
thread_join_type_t join_type;
/** If true, thread_join_timeout() cannot be used on this thread. */
bool detached;
/** Waitq for thread_join_timeout(). */
waitq_t join_wq;
 
fpu_context_t *saved_fpu_context;
int fpu_context_exists;
123,34 → 158,48
 
/*
* Defined only if thread doesn't run.
* It means that fpu context is in CPU that last time executes this thread.
* This disables migration.
* It means that fpu context is in CPU that last time executes this
* thread. This disables migration.
*/
int fpu_context_engaged;
 
rwlock_type_t rwlock_holder_type;
 
void (* call_me)(void *); /**< Funtion to be called in scheduler before the thread is put asleep. */
void *call_me_with; /**< Argument passed to call_me(). */
/** Callback fired in scheduler before the thread is put asleep. */
void (* call_me)(void *);
/** Argument passed to call_me(). */
void *call_me_with;
 
state_t state; /**< Thread's state. */
int flags; /**< Thread's flags. */
/** Thread's state. */
state_t state;
/** Thread's flags. */
int flags;
cpu_t *cpu; /**< Thread's CPU. */
task_t *task; /**< Containing task. */
/** Thread's CPU. */
cpu_t *cpu;
/** Containing task. */
task_t *task;
 
uint64_t ticks; /**< Ticks before preemption. */
/** Ticks before preemption. */
uint64_t ticks;
uint64_t cycles; /**< Thread accounting. */
uint64_t last_cycle; /**< Last sampled cycle. */
bool uncounted; /**< Thread doesn't affect accumulated accounting. */
/** Thread accounting. */
uint64_t cycles;
/** Last sampled cycle. */
uint64_t last_cycle;
/** Thread doesn't affect accumulated accounting. */
bool uncounted;
 
int priority; /**< Thread's priority. Implemented as index to CPU->rq */
uint32_t tid; /**< Thread ID. */
/** Thread's priority. Implemented as index to CPU->rq */
int priority;
/** Thread ID. */
uint32_t tid;
thread_arch_t arch; /**< Architecture-specific data. */
/** Architecture-specific data. */
thread_arch_t arch;
 
uint8_t *kstack; /**< Thread's kernel stack. */
/** Thread's kernel stack. */
uint8_t *kstack;
} thread_t;
 
/** Thread list lock.
161,10 → 210,12
*/
SPINLOCK_EXTERN(threads_lock);
 
extern btree_t threads_btree; /**< B+tree containing all threads. */
/** B+tree containing all threads. */
extern btree_t threads_btree;
 
extern void thread_init(void);
extern thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags, char *name, bool uncounted);
extern thread_t *thread_create(void (* func)(void *), void *arg, task_t *task,
int flags, char *name, bool uncounted);
extern void thread_ready(thread_t *t);
extern void thread_exit(void) __attribute__((noreturn));
 
181,11 → 232,13
extern void thread_sleep(uint32_t sec);
extern void thread_usleep(uint32_t usec);
 
#define thread_join(t) thread_join_timeout((t), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE)
#define thread_join(t) \
thread_join_timeout((t), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE)
extern int thread_join_timeout(thread_t *t, uint32_t usec, int flags);
extern void thread_detach(thread_t *t);
 
extern void thread_register_call_me(void (* call_me)(void *), void *call_me_with);
extern void thread_register_call_me(void (* call_me)(void *),
void *call_me_with);
extern void thread_print_list(void);
extern void thread_destroy(thread_t *t);
extern void thread_update_accounting(void);
192,10 → 245,10
extern bool thread_exists(thread_t *t);
extern void thread_interrupt_sleep(thread_t *t);
 
/* Fpu context slab cache */
/** Fpu context slab cache. */
extern slab_cache_t *fpu_context_slab;
 
/** Thread syscall prototypes. */
/* Thread syscall prototypes. */
unative_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name);
unative_t sys_thread_exit(int uspace_status);
 
/trunk/kernel/generic/include/synch/futex.h
42,14 → 42,19
 
/** Kernel-side futex structure. */
typedef struct {
uintptr_t paddr; /**< Physical address of the status variable. */
waitq_t wq; /**< Wait queue for threads waiting for futex availability. */
link_t ht_link; /**< Futex hash table link. */
count_t refcount; /**< Number of tasks that reference this futex. */
/** Physical address of the status variable. */
uintptr_t paddr;
/** Wait queue for threads waiting for futex availability. */
waitq_t wq;
/** Futex hash table link. */
link_t ht_link;
/** Number of tasks that reference this futex. */
count_t refcount;
} futex_t;
 
extern void futex_init(void);
extern unative_t sys_futex_sleep_timeout(uintptr_t uaddr, uint32_t usec, int flags);
extern unative_t sys_futex_sleep_timeout(uintptr_t uaddr, uint32_t usec,
int flags);
extern unative_t sys_futex_wakeup(uintptr_t uaddr);
 
extern void futex_cleanup(void);
/trunk/kernel/generic/include/synch/condvar.h
44,15 → 44,16
waitq_t wq;
} condvar_t;
 
#define condvar_wait(cv,mtx) \
_condvar_wait_timeout((cv),(mtx),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NONE)
#define condvar_wait_timeout(cv,mtx,usec) \
_condvar_wait_timeout((cv),(mtx),(usec),SYNCH_FLAGS_NONE)
#define condvar_wait(cv, mtx) \
_condvar_wait_timeout((cv), (mtx), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE)
#define condvar_wait_timeout(cv, mtx, usec) \
_condvar_wait_timeout((cv), (mtx), (usec), SYNCH_FLAGS_NONE)
 
extern void condvar_initialize(condvar_t *cv);
extern void condvar_signal(condvar_t *cv);
extern void condvar_broadcast(condvar_t *cv);
extern int _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, uint32_t usec, int flags);
extern int _condvar_wait_timeout(condvar_t *cv, mutex_t *mtx, uint32_t usec,
int flags);
 
#endif
 
/trunk/kernel/generic/include/synch/rwlock.h
48,22 → 48,28
 
typedef struct {
SPINLOCK_DECLARE(lock);
mutex_t exclusive; /**< Mutex for writers, readers can bypass it if readers_in is positive. */
count_t readers_in; /**< Number of readers in critical section. */
/**
* Mutex for writers, readers can bypass it if readers_in is positive.
*/
mutex_t exclusive;
/** Number of readers in critical section. */
count_t readers_in;
} rwlock_t;
 
#define rwlock_write_lock(rwl) \
_rwlock_write_lock_timeout((rwl),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NONE)
_rwlock_write_lock_timeout((rwl), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE)
#define rwlock_read_lock(rwl) \
_rwlock_read_lock_timeout((rwl),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NONE)
_rwlock_read_lock_timeout((rwl), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE)
#define rwlock_write_trylock(rwl) \
_rwlock_write_lock_timeout((rwl),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NON_BLOCKING)
_rwlock_write_lock_timeout((rwl), SYNCH_NO_TIMEOUT, \
SYNCH_FLAGS_NON_BLOCKING)
#define rwlock_read_trylock(rwl) \
_rwlock_read_lock_timeout((rwl),SYNCH_NO_TIMEOUT,SYNCH_FLAGS_NON_BLOCKING)
#define rwlock_write_lock_timeout(rwl,usec) \
_rwlock_write_lock_timeout((rwl),(usec),SYNCH_FLAGS_NONE)
#define rwlock_read_lock_timeout(rwl,usec) \
_rwlock_read_lock_timeout((rwl),(usec),SYNCH_FLAGS_NONE)
_rwlock_read_lock_timeout((rwl), SYNCH_NO_TIMEOUT, \
SYNCH_FLAGS_NON_BLOCKING)
#define rwlock_write_lock_timeout(rwl, usec) \
_rwlock_write_lock_timeout((rwl), (usec), SYNCH_FLAGS_NONE)
#define rwlock_read_lock_timeout(rwl, usec) \
_rwlock_read_lock_timeout((rwl), (usec), SYNCH_FLAGS_NONE)
 
extern void rwlock_initialize(rwlock_t *rwl);
extern void rwlock_read_unlock(rwlock_t *rwl);
/trunk/kernel/generic/include/synch/semaphore.h
46,7 → 46,7
#define semaphore_down(s) \
_semaphore_down_timeout((s), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE)
#define semaphore_trydown(s) \
_semaphore_down_timeout((s), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING)
_semaphore_down_timeout((s), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NON_BLOCKING)
#define semaphore_down_timeout(s, usec) \
_semaphore_down_timeout((s), (usec), SYNCH_FLAGS_NONE)
 
/trunk/kernel/generic/include/synch/synch.h
35,20 → 35,31
#ifndef KERN_SYNCH_H_
#define KERN_SYNCH_H_
 
#define SYNCH_NO_TIMEOUT 0 /**< Request with no timeout. */
/** Request with no timeout. */
#define SYNCH_NO_TIMEOUT 0
 
#define SYNCH_FLAGS_NONE 0 /**< No flags specified. */
#define SYNCH_FLAGS_NON_BLOCKING (1<<0) /**< Non-blocking operation request. */
#define SYNCH_FLAGS_INTERRUPTIBLE (1<<1) /**< Interruptible operation. */
/** No flags specified. */
#define SYNCH_FLAGS_NONE 0
/** Non-blocking operation request. */
#define SYNCH_FLAGS_NON_BLOCKING (1 << 0)
/** Interruptible operation. */
#define SYNCH_FLAGS_INTERRUPTIBLE (1 << 1)
 
#define ESYNCH_WOULD_BLOCK 1 /**< Could not satisfy the request without going to sleep. */
#define ESYNCH_TIMEOUT 2 /**< Timeout occurred. */
#define ESYNCH_INTERRUPTED 4 /**< Sleep was interrupted. */
#define ESYNCH_OK_ATOMIC 8 /**< Operation succeeded without sleeping. */
#define ESYNCH_OK_BLOCKED 16 /**< Operation succeeded and did sleep. */
/** Could not satisfy the request without going to sleep. */
#define ESYNCH_WOULD_BLOCK 1
/** Timeout occurred. */
#define ESYNCH_TIMEOUT 2
/** Sleep was interrupted. */
#define ESYNCH_INTERRUPTED 4
/** Operation succeeded without sleeping. */
#define ESYNCH_OK_ATOMIC 8
/** Operation succeeded and did sleep. */
#define ESYNCH_OK_BLOCKED 16
 
#define SYNCH_FAILED(rc) ((rc) & (ESYNCH_WOULD_BLOCK | ESYNCH_TIMEOUT | ESYNCH_INTERRUPTED))
#define SYNCH_OK(rc) ((rc) & (ESYNCH_OK_ATOMIC | ESYNCH_OK_BLOCKED))
#define SYNCH_FAILED(rc) \
((rc) & (ESYNCH_WOULD_BLOCK | ESYNCH_TIMEOUT | ESYNCH_INTERRUPTED))
#define SYNCH_OK(rc) \
((rc) & (ESYNCH_OK_ATOMIC | ESYNCH_OK_BLOCKED))
 
#endif
 
/trunk/kernel/generic/include/synch/waitq.h
52,8 → 52,12
*/
SPINLOCK_DECLARE(lock);
 
int missed_wakeups; /**< Number of waitq_wakeup() calls that didn't find a thread to wake up. */
link_t head; /**< List of sleeping threads for wich there was no missed_wakeup. */
/**
* Number of waitq_wakeup() calls that didn't find a thread to wake up.
*/
int missed_wakeups;
/** List of sleeping threads for wich there was no missed_wakeup. */
link_t head;
} waitq_t;
 
#define waitq_sleep(wq) \
/trunk/kernel/generic/include/ddi/ddi_arg.h
37,18 → 37,23
 
/** Structure encapsulating arguments for SYS_PHYSMEM_MAP syscall. */
typedef struct {
unsigned long long task_id; /** ID of the destination task. */
void *phys_base; /** Physical address of starting frame. */
void *virt_base; /** Virtual address of starting page. */
unsigned long pages; /** Number of pages to map. */
int flags; /** Address space area flags for the mapping. */
/** ID of the destination task. */
unsigned long long task_id;
/** Physical address of starting frame. */
void *phys_base;
/** Virtual address of starting page. */
void *virt_base;
/** Number of pages to map. */
unsigned long pages;
/** Address space area flags for the mapping. */
int flags;
} ddi_memarg_t;
 
/** Structure encapsulating arguments for SYS_ENABLE_IOSPACE syscall. */
typedef struct {
unsigned long long task_id; /** ID of the destination task. */
void *ioaddr; /** Starting I/O space address. */
unsigned long size; /** Number of bytes. */
unsigned long long task_id; /**< ID of the destination task. */
void *ioaddr; /**< Starting I/O space address. */
unsigned long size; /**< Number of bytes. */
} ddi_ioarg_t;
 
#endif
/trunk/kernel/generic/include/ddi/irq.h
91,21 → 91,27
* It is protected by irq_t::lock.
*/
typedef struct {
bool notify; /**< When false, notifications are not sent. */
answerbox_t *answerbox; /**< Answerbox for notifications. */
unative_t method; /**< Method to be used for the notification. */
irq_code_t *code; /**< Top-half pseudocode. */
count_t counter; /**< Counter. */
link_t link; /**< Link between IRQs that are notifying the
same answerbox. The list is protected by
the answerbox irq_lock. */
/** When false, notifications are not sent. */
bool notify;
/** Answerbox for notifications. */
answerbox_t *answerbox;
/** Method to be used for the notification. */
unative_t method;
/** Top-half pseudocode. */
irq_code_t *code;
/** Counter. */
count_t counter;
/**
* Link between IRQs that are notifying the same answerbox. The list is
* protected by the answerbox irq_lock.
*/
link_t link;
} ipc_notif_cfg_t;
 
/** Structure representing one device IRQ.
*
* If one device has multiple interrupts, there will
* be multiple irq_t instantions with the same
* devno.
* If one device has multiple interrupts, there will be multiple irq_t
* instantions with the same devno.
*/
typedef struct irq {
/** Hash table link. */
/trunk/kernel/generic/include/console/chardev.h
45,10 → 45,13
 
/* Character device operations interface. */
typedef struct {
void (* suspend)(struct chardev *); /**< Suspend pushing characters. */
void (* resume)(struct chardev *); /**< Resume pushing characters. */
void (* write)(struct chardev *, char c); /**< Write character to stream. */
/** Read character directly from device, assume interrupts disabled */
/** Suspend pushing characters. */
void (* suspend)(struct chardev *);
/** Resume pushing characters. */
void (* resume)(struct chardev *);
/** Write character to stream. */
void (* write)(struct chardev *, char c);
/** Read character directly from device, assume interrupts disabled. */
char (* read)(struct chardev *);
} chardev_operations_t;
 
57,17 → 60,18
char *name;
waitq_t wq;
SPINLOCK_DECLARE(lock); /**< Protects everything below. */
/** Protects everything below. */
SPINLOCK_DECLARE(lock);
uint8_t buffer[CHARDEV_BUFLEN];
count_t counter;
chardev_operations_t *op; /**< Implementation of chardev operations. */
/** Implementation of chardev operations. */
chardev_operations_t *op;
index_t index;
void *data;
} chardev_t;
 
extern void chardev_initialize(char *name,
chardev_t *chardev,
chardev_operations_t *op);
extern void chardev_initialize(char *name, chardev_t *chardev,
chardev_operations_t *op);
extern void chardev_push_character(chardev_t *chardev, uint8_t ch);
 
#endif /* KERN_CHARDEV_H_ */
/trunk/kernel/generic/include/console/kconsole.h
45,28 → 45,42
ARG_TYPE_INVALID = 0,
ARG_TYPE_INT,
ARG_TYPE_STRING,
ARG_TYPE_VAR /**< Variable type - either symbol or string */
/** Variable type - either symbol or string. */
ARG_TYPE_VAR
} cmd_arg_type_t;
 
/** Structure representing one argument of kconsole command line. */
typedef struct {
cmd_arg_type_t type; /**< Type descriptor. */
void *buffer; /**< Buffer where to store data. */
size_t len; /**< Size of the buffer. */
unative_t intval; /**< Integer value */
cmd_arg_type_t vartype; /**< Resulting type of variable arg */
/** Type descriptor. */
cmd_arg_type_t type;
/** Buffer where to store data. */
void *buffer;
/** Size of the buffer. */
size_t len;
/** Integer value. */
unative_t intval;
/** Resulting type of variable arg */
cmd_arg_type_t vartype;
} cmd_arg_t;
 
/** Structure representing one kconsole command. */
typedef struct {
link_t link; /**< Command list link. */
SPINLOCK_DECLARE(lock); /**< This lock protects everything below. */
const char *name; /**< Command name. */
const char *description; /**< Textual description. */
int (* func)(cmd_arg_t *); /**< Function implementing the command. */
count_t argc; /**< Number of arguments. */
cmd_arg_t *argv; /**< Argument vector. */
void (* help)(void); /**< Function for printing detailed help. */
/** Command list link. */
link_t link;
/** This lock protects everything below. */
SPINLOCK_DECLARE(lock);
/** Command name. */
const char *name;
/** Textual description. */
const char *description;
/** Function implementing the command. */
int (* func)(cmd_arg_t *);
/** Number of arguments. */
count_t argc;
/** Argument vector. */
cmd_arg_t *argv;
/** Function for printing detailed help. */
void (* help)(void);
} cmd_info_t;
 
SPINLOCK_EXTERN(cmd_lock);
/trunk/kernel/generic/include/adt/hash_table.h
42,7 → 42,8
typedef struct {
/** Hash function.
*
* @param key Array of keys needed to compute hash index. All keys must be passed.
* @param key Array of keys needed to compute hash index. All keys must
* be passed.
*
* @return Index into hash table.
*/
50,7 → 51,8
/** Hash table item comparison function.
*
* @param key Array of keys that will be compared with item. It is not necessary to pass all keys.
* @param key Array of keys that will be compared with item. It is not
* necessary to pass all keys.
*
* @return true if the keys match, false otherwise.
*/
71,9 → 73,11
hash_table_operations_t *op;
} hash_table_t;
 
#define hash_table_get_instance(item, type, member) list_get_instance((item), type, member)
#define hash_table_get_instance(item, type, member) \
list_get_instance((item), type, member)
 
extern void hash_table_create(hash_table_t *h, count_t m, count_t max_keys, hash_table_operations_t *op);
extern void hash_table_create(hash_table_t *h, count_t m, count_t max_keys,
hash_table_operations_t *op);
extern void hash_table_insert(hash_table_t *h, unative_t key[], link_t *item);
extern link_t *hash_table_find(hash_table_t *h, unative_t key[]);
extern void hash_table_remove(hash_table_t *h, unative_t key[], count_t keys);
/trunk/kernel/generic/include/adt/list.h
47,7 → 47,8
*
* @param name Name of the new statically allocated list.
*/
#define LIST_INITIALIZE(name) link_t name = { .prev = &name, .next = &name }
#define LIST_INITIALIZE(name) \
link_t name = { .prev = &name, .next = &name }
 
/** Initialize doubly-linked circular list link
*
107,7 → 108,8
*
* Remove item from doubly-linked circular list.
*
* @param link Pointer to link_t structure to be removed from the list it is contained in.
* @param link Pointer to link_t structure to be removed from the list it is
* contained in.
*/
static inline void list_remove(link_t *link)
{
135,8 → 137,10
* Note that the algorithm works both directions:
* concatenates splitted lists and splits concatenated lists.
*
* @param part1 Pointer to link_t structure leading the first (half of the headless) list.
* @param part2 Pointer to link_t structure leading the second (half of the headless) list.
* @param part1 Pointer to link_t structure leading the first (half of the
* headless) list.
* @param part2 Pointer to link_t structure leading the second (half of the
* headless) list.
*/
static inline void headless_list_split_or_concat(link_t *part1, link_t *part2)
{
154,8 → 158,10
*
* Split headless doubly-linked circular list.
*
* @param part1 Pointer to link_t structure leading the first half of the headless list.
* @param part2 Pointer to link_t structure leading the second half of the headless list.
* @param part1 Pointer to link_t structure leading the first half of the
* headless list.
* @param part2 Pointer to link_t structure leading the second half of the
* headless list.
*/
static inline void headless_list_split(link_t *part1, link_t *part2)
{
167,7 → 173,7
* Concatenate two headless doubly-linked circular lists.
*
* @param part1 Pointer to link_t structure leading the first headless list.
* @param part2 Pointer to link_t structure leading the second headless list.
* @param part2 Pointer to link_t structure leading the second headless list.
*/
static inline void headless_list_concat(link_t *part1, link_t *part2)
{
174,7 → 180,8
headless_list_split_or_concat(part1, part2);
}
 
#define list_get_instance(link,type,member) (type *)(((uint8_t*)(link))-((uint8_t*)&(((type *)NULL)->member)))
#define list_get_instance(link,type,member) \
((type *)(((uint8_t *)(link)) - ((uint8_t *)&(((type *)NULL)->member))))
 
extern bool list_member(const link_t *link, const link_t *head);
extern void list_concat(link_t *head1, link_t *head2);
/trunk/kernel/generic/include/adt/btree.h
48,19 → 48,25
/** Number of keys. */
count_t keys;
 
/** Keys. We currently support only single keys. Additional room for one extra key is provided. */
/**
* Keys. We currently support only single keys. Additional room for one
* extra key is provided.
*/
btree_key_t key[BTREE_MAX_KEYS + 1];
 
/**
* Pointers to values. Sorted according to the key array. Defined only in leaf-level.
* There is room for storing value for the extra key.
* Pointers to values. Sorted according to the key array. Defined only in
* leaf-level. There is room for storing value for the extra key.
*/
void *value[BTREE_MAX_KEYS + 1];
/**
* Pointers to descendants of this node sorted according to the key array.
* Pointers to descendants of this node sorted according to the key
* array.
*
* subtree[0] points to subtree with keys lesser than to key[0].
* subtree[1] points to subtree with keys greater than or equal to key[0] and lesser than key[1].
* subtree[1] points to subtree with keys greater than or equal to
* key[0] and lesser than key[1].
* ...
* There is room for storing a subtree pointer for the extra key.
*/
69,10 → 75,12
/** Pointer to parent node. Root node has NULL parent. */
struct btree_node *parent;
 
/** Link connecting leaf-level nodes. Defined only when this node is a leaf. */
/**
* Link connecting leaf-level nodes. Defined only when this node is a
* leaf. */
link_t leaf_link;
 
/** Variables needed by btree_print(). */
/* Variables needed by btree_print(). */
link_t bfs_link;
int depth;
} btree_node_t;
88,12 → 96,15
extern void btree_create(btree_t *t);
extern void btree_destroy(btree_t *t);
 
extern void btree_insert(btree_t *t, btree_key_t key, void *value, btree_node_t *leaf_node);
extern void btree_insert(btree_t *t, btree_key_t key, void *value,
btree_node_t *leaf_node);
extern void btree_remove(btree_t *t, btree_key_t key, btree_node_t *leaf_node);
extern void *btree_search(btree_t *t, btree_key_t key, btree_node_t **leaf_node);
 
extern btree_node_t *btree_leaf_node_left_neighbour(btree_t *t, btree_node_t *node);
extern btree_node_t *btree_leaf_node_right_neighbour(btree_t *t, btree_node_t *node);
extern btree_node_t *btree_leaf_node_left_neighbour(btree_t *t,
btree_node_t *node);
extern btree_node_t *btree_leaf_node_right_neighbour(btree_t *t,
btree_node_t *node);
 
extern void btree_print(btree_t *t);
#endif
/trunk/kernel/generic/include/adt/fifo.h
106,7 → 106,8
*
*/
#define fifo_push(name, value) \
name.fifo[name.tail = (name.tail + 1) < name.items ? (name.tail + 1) : 0] = (value)
name.fifo[name.tail = \
(name.tail + 1) < name.items ? (name.tail + 1) : 0] = (value)
 
/** Allocate memory for dynamic FIFO.
*
/trunk/kernel/generic/include/mm/mm.h
46,20 → 46,20
#define PAGE_EXEC_SHIFT 5
#define PAGE_GLOBAL_SHIFT 6
 
#define PAGE_NOT_CACHEABLE (0 << PAGE_CACHEABLE_SHIFT)
#define PAGE_CACHEABLE (1 << PAGE_CACHEABLE_SHIFT)
#define PAGE_NOT_CACHEABLE (0 << PAGE_CACHEABLE_SHIFT)
#define PAGE_CACHEABLE (1 << PAGE_CACHEABLE_SHIFT)
 
#define PAGE_PRESENT (0 << PAGE_PRESENT_SHIFT)
#define PAGE_NOT_PRESENT (1 << PAGE_PRESENT_SHIFT)
#define PAGE_PRESENT (0 << PAGE_PRESENT_SHIFT)
#define PAGE_NOT_PRESENT (1 << PAGE_PRESENT_SHIFT)
 
#define PAGE_USER (1 << PAGE_USER_SHIFT)
#define PAGE_KERNEL (0 << PAGE_USER_SHIFT)
#define PAGE_USER (1 << PAGE_USER_SHIFT)
#define PAGE_KERNEL (0 << PAGE_USER_SHIFT)
 
#define PAGE_READ (1 << PAGE_READ_SHIFT)
#define PAGE_WRITE (1 << PAGE_WRITE_SHIFT)
#define PAGE_EXEC (1 << PAGE_EXEC_SHIFT)
#define PAGE_READ (1 << PAGE_READ_SHIFT)
#define PAGE_WRITE (1 << PAGE_WRITE_SHIFT)
#define PAGE_EXEC (1 << PAGE_EXEC_SHIFT)
 
#define PAGE_GLOBAL (1 << PAGE_GLOBAL_SHIFT)
#define PAGE_GLOBAL (1 << PAGE_GLOBAL_SHIFT)
 
#endif
 
/trunk/kernel/generic/include/mm/page.h
58,7 → 58,7
extern void page_table_lock(as_t *as, bool lock);
extern void page_table_unlock(as_t *as, bool unlock);
extern void page_mapping_insert(as_t *as, uintptr_t page, uintptr_t frame,
int flags);
int flags);
extern void page_mapping_remove(as_t *as, uintptr_t page);
extern pte_t *page_mapping_find(as_t *as, uintptr_t page);
extern pte_t *page_table_create(int flags);
/trunk/kernel/generic/include/mm/slab.h
56,13 → 56,19
#define SLAB_MAX_BADNESS(cache) ((PAGE_SIZE << (cache)->order) >> 2)
 
/* slab_reclaim constants */
#define SLAB_RECLAIM_ALL 0x1 /**< Reclaim all possible memory, because we are in memory stress */
 
/** Reclaim all possible memory, because we are in memory stress */
#define SLAB_RECLAIM_ALL 0x1
 
/* cache_create flags */
#define SLAB_CACHE_NOMAGAZINE 0x1 /**< Do not use per-cpu cache */
#define SLAB_CACHE_SLINSIDE 0x2 /**< Have control structure inside SLAB */
#define SLAB_CACHE_MAGDEFERRED (0x4 | SLAB_CACHE_NOMAGAZINE) /**< We add magazine cache later, if we have this flag */
 
/** Do not use per-cpu cache */
#define SLAB_CACHE_NOMAGAZINE 0x1
/** Have control structure inside SLAB */
#define SLAB_CACHE_SLINSIDE 0x2
/** We add magazine cache later, if we have this flag */
#define SLAB_CACHE_MAGDEFERRED (0x4 | SLAB_CACHE_NOMAGAZINE)
 
typedef struct {
link_t link;
count_t busy; /**< Count of full slots in magazine */
81,12 → 87,17
char *name;
 
link_t link;
 
/* Configuration */
size_t size; /**< Size of slab position - align_up(sizeof(obj)) */
/** Size of slab position - align_up(sizeof(obj)) */
size_t size;
 
int (*constructor)(void *obj, int kmflag);
int (*destructor)(void *obj);
int flags; /**< Flags changing behaviour of cache */
 
/** Flags changing behaviour of cache */
int flags;
 
/* Computed values */
uint8_t order; /**< Order of frames to be allocated */
int objects; /**< Number of objects that fit in */
95,7 → 106,8
atomic_t allocated_slabs;
atomic_t allocated_objs;
atomic_t cached_objs;
atomic_t magazine_counter; /**< How many magazines in magazines list */
/** How many magazines in magazines list */
atomic_t magazine_counter;
 
/* Slabs */
link_t full_slabs; /**< List of full slabs */
109,12 → 121,9
slab_mag_cache_t *mag_cache;
} slab_cache_t;
 
extern slab_cache_t * slab_cache_create(char *name,
size_t size,
size_t align,
int (*constructor)(void *obj, int kmflag),
int (*destructor)(void *obj),
int flags);
extern slab_cache_t * slab_cache_create(char *name, size_t size, size_t align,
int (*constructor)(void *obj, int kmflag), int (*destructor)(void *obj),
int flags);
extern void slab_cache_destroy(slab_cache_t *cache);
 
extern void * slab_alloc(slab_cache_t *cache, int flags);
121,7 → 130,7
extern void slab_free(slab_cache_t *cache, void *obj);
extern count_t slab_reclaim(int flags);
 
/** Initialize slab subsytem */
/* slab subsytem initialization */
extern void slab_cache_init(void);
extern void slab_enable_cpucache(void);
 
/trunk/kernel/generic/include/mm/tlb.h
39,17 → 39,21
#include <arch/types.h>
 
/**
* Number of TLB shootdown messages that can be queued in processor
* tlb_messages queue.
* Number of TLB shootdown messages that can be queued in processor tlb_messages
* queue.
*/
#define TLB_MESSAGE_QUEUE_LEN 10
 
/** Type of TLB shootdown message. */
typedef enum {
TLB_INVL_INVALID = 0, /**< Invalid type. */
TLB_INVL_ALL, /**< Invalidate all entries in TLB. */
TLB_INVL_ASID, /**< Invalidate all entries belonging to one address space. */
TLB_INVL_PAGES /**< Invalidate specified page range belonging to one address space. */
/** Invalid type. */
TLB_INVL_INVALID = 0,
/** Invalidate all entries in TLB. */
TLB_INVL_ALL,
/** Invalidate all entries belonging to one address space. */
TLB_INVL_ASID,
/** Invalidate specified page range belonging to one address space. */
TLB_INVL_PAGES
} tlb_invalidate_type_t;
 
/** TLB shootdown message. */
63,13 → 67,14
extern void tlb_init(void);
 
#ifdef CONFIG_SMP
extern void tlb_shootdown_start(tlb_invalidate_type_t type, asid_t asid, uintptr_t page, count_t count);
extern void tlb_shootdown_start(tlb_invalidate_type_t type, asid_t asid,
uintptr_t page, count_t count);
extern void tlb_shootdown_finalize(void);
extern void tlb_shootdown_ipi_recv(void);
#else
# define tlb_shootdown_start(w, x, y, z)
# define tlb_shootdown_finalize()
# define tlb_shootdown_ipi_recv()
#define tlb_shootdown_start(w, x, y, z)
#define tlb_shootdown_finalize()
#define tlb_shootdown_ipi_recv()
#endif /* CONFIG_SMP */
 
/* Export TLB interface that each architecture must implement. */
/trunk/kernel/generic/include/mm/as.h
53,7 → 53,10
#include <adt/btree.h>
#include <lib/elf.h>
 
/** Defined to be true if user address space and kernel address space shadow each other. */
/**
* Defined to be true if user address space and kernel address space shadow each
* other.
*/
#define KERNEL_ADDRESS_SPACE_SHADOWED KERNEL_ADDRESS_SPACE_SHADOWED_ARCH
 
#define KERNEL_ADDRESS_SPACE_START KERNEL_ADDRESS_SPACE_START_ARCH
61,18 → 64,57
#define USER_ADDRESS_SPACE_START USER_ADDRESS_SPACE_START_ARCH
#define USER_ADDRESS_SPACE_END USER_ADDRESS_SPACE_END_ARCH
 
#define USTACK_ADDRESS USTACK_ADDRESS_ARCH
#define USTACK_ADDRESS USTACK_ADDRESS_ARCH
 
#define FLAG_AS_KERNEL (1 << 0) /**< Kernel address space. */
/** Kernel address space. */
#define FLAG_AS_KERNEL (1 << 0)
 
/** Address space area attributes. */
/* Address space area attributes. */
#define AS_AREA_ATTR_NONE 0
#define AS_AREA_ATTR_PARTIAL 1 /**< Not fully initialized area. */
 
#define AS_PF_FAULT 0 /**< The page fault was not resolved by as_page_fault(). */
#define AS_PF_OK 1 /**< The page fault was resolved by as_page_fault(). */
#define AS_PF_DEFER 2 /**< The page fault was caused by memcpy_from_uspace() or memcpy_to_uspace(). */
/** The page fault was not resolved by as_page_fault(). */
#define AS_PF_FAULT 0
/** The page fault was resolved by as_page_fault(). */
#define AS_PF_OK 1
/** The page fault was caused by memcpy_from_uspace() or memcpy_to_uspace(). */
#define AS_PF_DEFER 2
 
/** Address space structure.
*
* as_t contains the list of as_areas of userspace accessible
* pages for one or more tasks. Ranges of kernel memory pages are not
* supposed to figure in the list as they are shared by all tasks and
* set up during system initialization.
*/
typedef struct as {
/** Protected by asidlock. */
link_t inactive_as_with_asid_link;
 
mutex_t lock;
 
/** Number of references (i.e tasks that reference this as). */
count_t refcount;
 
/** Number of processors on wich is this address space active. */
count_t cpu_refcount;
 
/** B+tree of address space areas. */
btree_t as_area_btree;
/**
* Address space identifier.
* Constant on architectures that do not support ASIDs.
*/
asid_t asid;
/** Non-generic content. */
as_genarch_t genarch;
 
/** Architecture specific content. */
as_arch_t arch;
} as_t;
 
typedef struct {
pte_t *(* page_table_create)(int flags);
void (* page_table_destroy)(pte_t *page_table);
80,11 → 122,19
void (* page_table_unlock)(as_t *as, bool unlock);
} as_operations_t;
 
/** This structure contains information associated with the shared address space area. */
/**
* This structure contains information associated with the shared address space
* area.
*/
typedef struct {
mutex_t lock; /**< This lock must be acquired only when the as_area lock is held. */
count_t refcount; /**< This structure can be deallocated if refcount drops to 0. */
btree_t pagemap; /**< B+tree containing complete map of anonymous pages of the shared area. */
/** This lock must be acquired only when the as_area lock is held. */
mutex_t lock;
/** This structure can be deallocated if refcount drops to 0. */
count_t refcount;
/**
* B+tree containing complete map of anonymous pages of the shared area.
*/
btree_t pagemap;
} share_info_t;
 
/** Page fault access type. */
115,16 → 165,28
*/
typedef struct {
mutex_t lock;
as_t *as; /**< Containing address space. */
int flags; /**< Flags related to the memory represented by the address space area. */
int attributes; /**< Attributes related to the address space area itself. */
count_t pages; /**< Size of this area in multiples of PAGE_SIZE. */
uintptr_t base; /**< Base address of this area. */
btree_t used_space; /**< Map of used space. */
share_info_t *sh_info; /**< If the address space area has been shared, this pointer will
reference the share info structure. */
struct mem_backend *backend; /**< Memory backend backing this address space area. */
/** Containing address space. */
as_t *as;
/** Flags related to the memory represented by the address space area. */
int flags;
/** Attributes related to the address space area itself. */
int attributes;
/** Size of this area in multiples of PAGE_SIZE. */
count_t pages;
/** Base address of this area. */
uintptr_t base;
/** Map of used space. */
btree_t used_space;
 
/**
* If the address space area has been shared, this pointer will reference
* the share info structure.
*/
share_info_t *sh_info;
 
/** Memory backend backing this address space area. */
struct mem_backend *backend;
 
/** Data to be used by the backend. */
mem_backend_data_t backend_data;
} as_area_t;
146,15 → 208,16
 
extern as_t *as_create(int flags);
extern void as_destroy(as_t *as);
extern void as_switch(as_t *old, as_t *replace);
extern void as_switch(as_t *old_as, as_t *new_as);
extern int as_page_fault(uintptr_t page, pf_access_t access, istate_t *istate);
 
extern as_area_t *as_area_create(as_t *as, int flags, size_t size, uintptr_t base, int attrs,
mem_backend_t *backend, mem_backend_data_t *backend_data);
extern as_area_t *as_area_create(as_t *as, int flags, size_t size,
uintptr_t base, int attrs, mem_backend_t *backend,
mem_backend_data_t *backend_data);
extern int as_area_destroy(as_t *as, uintptr_t address);
extern int as_area_resize(as_t *as, uintptr_t address, size_t size, int flags);
int as_area_share(as_t *src_as, uintptr_t src_base, size_t acc_size,
as_t *dst_as, uintptr_t dst_base, int dst_flags_mask);
as_t *dst_as, uintptr_t dst_base, int dst_flags_mask);
 
extern int as_area_get_flags(as_area_t *area);
extern bool as_area_check_access(as_area_t *area, pf_access_t access);
/trunk/kernel/generic/include/mm/buddy.h
44,11 → 44,15
 
/** Buddy system operations to be implemented by each implementation. */
typedef struct {
/** Return pointer to left-side or right-side buddy for block passed as
* argument. */
/**
* Return pointer to left-side or right-side buddy for block passed as
* argument.
*/
link_t *(* find_buddy)(struct buddy_system *, link_t *);
/** Bisect the block passed as argument and return pointer to the new
* right-side buddy. */
/**
* Bisect the block passed as argument and return pointer to the new
* right-side buddy.
*/
link_t *(* bisect)(struct buddy_system *, link_t *);
/** Coalesce two buddies into a bigger block. */
link_t *(* coalesce)(struct buddy_system *, link_t *, link_t *);
75,7 → 79,7
} buddy_system_t;
 
extern void buddy_system_create(buddy_system_t *b, uint8_t max_order,
buddy_system_operations_t *op, void *data);
buddy_system_operations_t *op, void *data);
extern link_t *buddy_system_alloc(buddy_system_t *b, uint8_t i);
extern bool buddy_system_can_alloc(buddy_system_t *b, uint8_t order);
extern void buddy_system_free(buddy_system_t *b, link_t *block);
/trunk/kernel/generic/include/ipc/sysipc.h
40,20 → 40,22
#include <arch/types.h>
 
unative_t sys_ipc_call_sync_fast(unative_t phoneid, unative_t method,
unative_t arg1, ipc_data_t *data);
unative_t sys_ipc_call_sync(unative_t phoneid, ipc_data_t *question,
ipc_data_t *reply);
unative_t arg1, ipc_data_t *data);
unative_t sys_ipc_call_sync(unative_t phoneid, ipc_data_t *question,
ipc_data_t *reply);
unative_t sys_ipc_call_async_fast(unative_t phoneid, unative_t method,
unative_t arg1, unative_t arg2);
unative_t arg1, unative_t arg2);
unative_t sys_ipc_call_async(unative_t phoneid, ipc_data_t *data);
unative_t sys_ipc_answer_fast(unative_t callid, unative_t retval,
unative_t arg1, unative_t arg2);
unative_t arg1, unative_t arg2);
unative_t sys_ipc_answer(unative_t callid, ipc_data_t *data);
unative_t sys_ipc_wait_for_call(ipc_data_t *calldata, uint32_t usec, int nonblocking);
unative_t sys_ipc_wait_for_call(ipc_data_t *calldata, uint32_t usec,
int nonblocking);
unative_t sys_ipc_forward_fast(unative_t callid, unative_t phoneid,
unative_t method, unative_t arg1);
unative_t method, unative_t arg1);
unative_t sys_ipc_hangup(int phoneid);
unative_t sys_ipc_register_irq(inr_t inr, devno_t devno, unative_t method, irq_code_t *ucode);
unative_t sys_ipc_register_irq(inr_t inr, devno_t devno, unative_t method,
irq_code_t *ucode);
unative_t sys_ipc_unregister_irq(inr_t inr, devno_t devno);
 
#endif
/trunk/kernel/generic/include/ipc/ipc.h
47,40 → 47,49
#endif
 
/* Flags for calls */
#define IPC_CALL_ANSWERED (1<<0) /**< This is answer to a call */
#define IPC_CALL_STATIC_ALLOC (1<<1) /**< This call will not be freed on error */
#define IPC_CALL_DISCARD_ANSWER (1<<2) /**< Answer will not be passed to userspace, will be discarded */
#define IPC_CALL_FORWARDED (1<<3) /**< Call was forwarded */
#define IPC_CALL_CONN_ME_TO (1<<4) /**< Identify connect_me_to answer */
#define IPC_CALL_NOTIF (1<<5) /**< Interrupt notification */
 
/** This is answer to a call */
#define IPC_CALL_ANSWERED (1 << 0)
/** This call will not be freed on error */
#define IPC_CALL_STATIC_ALLOC (1 << 1)
/** Answer will not be passed to userspace, will be discarded */
#define IPC_CALL_DISCARD_ANSWER (1 << 2)
/** Call was forwarded */
#define IPC_CALL_FORWARDED (1 << 3)
/** Identify connect_me_to answer */
#define IPC_CALL_CONN_ME_TO (1 << 4)
/** Interrupt notification */
#define IPC_CALL_NOTIF (1 << 5)
 
/* Flags of callid (the addresses are aligned at least to 4,
* that is why we can use bottom 2 bits of the call address
*/
#define IPC_CALLID_ANSWERED 1 /**< Type of this msg is 'answer' */
#define IPC_CALLID_NOTIFICATION 2 /**< Type of this msg is 'notification' */
/** Type of this msg is 'answer' */
#define IPC_CALLID_ANSWERED 1
/** Type of this msg is 'notification' */
#define IPC_CALLID_NOTIFICATION 2
 
/* Return values from IPC_ASYNC */
#define IPC_CALLRET_FATAL -1
#define IPC_CALLRET_TEMPORARY -2
#define IPC_CALLRET_FATAL -1
#define IPC_CALLRET_TEMPORARY -2
 
 
/* Macros for manipulating calling data */
#define IPC_SET_RETVAL(data, retval) ((data).args[0] = (retval))
#define IPC_SET_METHOD(data, val) ((data).args[0] = (val))
#define IPC_SET_ARG1(data, val) ((data).args[1] = (val))
#define IPC_SET_ARG2(data, val) ((data).args[2] = (val))
#define IPC_SET_ARG3(data, val) ((data).args[3] = (val))
#define IPC_SET_RETVAL(data, retval) ((data).args[0] = (retval))
#define IPC_SET_METHOD(data, val) ((data).args[0] = (val))
#define IPC_SET_ARG1(data, val) ((data).args[1] = (val))
#define IPC_SET_ARG2(data, val) ((data).args[2] = (val))
#define IPC_SET_ARG3(data, val) ((data).args[3] = (val))
 
#define IPC_GET_METHOD(data) ((data).args[0])
#define IPC_GET_RETVAL(data) ((data).args[0])
#define IPC_GET_METHOD(data) ((data).args[0])
#define IPC_GET_RETVAL(data) ((data).args[0])
 
#define IPC_GET_ARG1(data) ((data).args[1])
#define IPC_GET_ARG2(data) ((data).args[2])
#define IPC_GET_ARG3(data) ((data).args[3])
#define IPC_GET_ARG1(data) ((data).args[1])
#define IPC_GET_ARG2(data) ((data).args[2])
#define IPC_GET_ARG3(data) ((data).args[3])
 
/* Well known phone descriptors */
#define PHONE_NS 0
#define PHONE_NS 0
 
/* System-specific methods - only through special syscalls
* These methods have special behaviour
166,11 → 175,16
struct task;
 
typedef enum {
IPC_PHONE_FREE = 0, /**< Phone is free and can be allocated */
IPC_PHONE_CONNECTING, /**< Phone is connecting somewhere */
IPC_PHONE_CONNECTED, /**< Phone is connected */
IPC_PHONE_HUNGUP, /**< Phone is hung up, waiting for answers to come */
IPC_PHONE_SLAMMED /**< Phone was hungup from server */
/** Phone is free and can be allocated */
IPC_PHONE_FREE = 0,
/** Phone is connecting somewhere */
IPC_PHONE_CONNECTING,
/** Phone is connected */
IPC_PHONE_CONNECTED,
/** Phone is hung up, waiting for answers to come */
IPC_PHONE_HUNGUP,
/** Phone was hungup from server */
IPC_PHONE_SLAMMED
} ipc_phone_state_t;
 
/** Structure identifying phone (in TASK structure) */
189,15 → 203,20
 
waitq_t wq;
 
link_t connected_phones; /**< Phones connected to this answerbox */
link_t calls; /**< Received calls */
/** Phones connected to this answerbox */
link_t connected_phones;
/** Received calls */
link_t calls;
link_t dispatched_calls; /* Should be hash table in the future */
 
link_t answers; /**< Answered calls */
/** Answered calls */
link_t answers;
 
SPINLOCK_DECLARE(irq_lock);
link_t irq_notifs; /**< Notifications from IRQ handlers */
link_t irq_head; /**< IRQs with notifications to this answerbox. */
/** Notifications from IRQ handlers */
link_t irq_notifs;
/** IRQs with notifications to this answerbox. */
link_t irq_head;
} answerbox_t;
 
typedef struct {
217,9 → 236,11
*/
answerbox_t *callerbox;
 
unative_t priv; /**< Private data to internal IPC */
/** Private data to internal IPC */
unative_t priv;
 
ipc_data_t data; /**< Data passed from/to userspace */
/** Data passed from/to userspace */
ipc_data_t data;
} call_t;
 
extern void ipc_init(void);
/trunk/kernel/generic/include/ipc/irq.h
43,8 → 43,8
#include <arch/types.h>
#include <adt/list.h>
 
extern int ipc_irq_register(answerbox_t *box, inr_t inr, devno_t devno, unative_t method,
irq_code_t *ucode);
extern int ipc_irq_register(answerbox_t *box, inr_t inr, devno_t devno,
unative_t method, irq_code_t *ucode);
extern void ipc_irq_send_notif(irq_t *irq);
extern void ipc_irq_send_msg(irq_t *irq, unative_t a1, unative_t a2, unative_t a3);
extern void ipc_irq_unregister(answerbox_t *box, inr_t inr, devno_t devno);
/trunk/kernel/generic/src/mm/as.c
168,7 → 168,7
as->refcount = 0;
as->cpu_refcount = 0;
#ifdef AS_PAGE_TABLE
as->page_table = page_table_create(flags);
as->genarch.page_table = page_table_create(flags);
#else
page_table_create(flags);
#endif
220,7 → 220,7
 
btree_destroy(&as->as_area_btree);
#ifdef AS_PAGE_TABLE
page_table_destroy(as->page_table);
page_table_destroy(as->genarch.page_table);
#else
page_table_destroy(NULL);
#endif
863,7 → 863,7
* @param old Old address space or NULL.
* @param new New address space.
*/
void as_switch(as_t *old, as_t *replace)
void as_switch(as_t *old_as, as_t *new_as)
{
ipl_t ipl;
bool needs_asid = false;
874,10 → 874,10
/*
* First, take care of the old address space.
*/
if (old) {
mutex_lock_active(&old->lock);
ASSERT(old->cpu_refcount);
if((--old->cpu_refcount == 0) && (old != AS_KERNEL)) {
if (old_as) {
mutex_lock_active(&old_as->lock);
ASSERT(old_as->cpu_refcount);
if((--old_as->cpu_refcount == 0) && (old_as != AS_KERNEL)) {
/*
* The old address space is no longer active on
* any processor. It can be appended to the
884,35 → 884,37
* list of inactive address spaces with assigned
* ASID.
*/
ASSERT(old->asid != ASID_INVALID);
list_append(&old->inactive_as_with_asid_link,
ASSERT(old_as->asid != ASID_INVALID);
list_append(&old_as->inactive_as_with_asid_link,
&inactive_as_with_asid_head);
}
mutex_unlock(&old->lock);
mutex_unlock(&old_as->lock);
 
/*
* Perform architecture-specific tasks when the address space
* is being removed from the CPU.
*/
as_deinstall_arch(old);
as_deinstall_arch(old_as);
}
 
/*
* Second, prepare the new address space.
*/
mutex_lock_active(&replace->lock);
if ((replace->cpu_refcount++ == 0) && (replace != AS_KERNEL)) {
if (replace->asid != ASID_INVALID) {
list_remove(&replace->inactive_as_with_asid_link);
mutex_lock_active(&new_as->lock);
if ((new_as->cpu_refcount++ == 0) && (new_as != AS_KERNEL)) {
if (new_as->asid != ASID_INVALID) {
list_remove(&new_as->inactive_as_with_asid_link);
} else {
/*
* Defer call to asid_get() until replace->lock is released.
* Defer call to asid_get() until new_as->lock is released.
*/
needs_asid = true;
}
}
SET_PTL0_ADDRESS(replace->page_table);
mutex_unlock(&replace->lock);
#ifdef AS_PAGE_TABLE
SET_PTL0_ADDRESS(new_as->genarch.page_table);
#endif
mutex_unlock(&new_as->lock);
 
if (needs_asid) {
/*
922,9 → 924,9
asid_t asid;
asid = asid_get();
mutex_lock_active(&replace->lock);
replace->asid = asid;
mutex_unlock(&replace->lock);
mutex_lock_active(&new_as->lock);
new_as->asid = asid;
mutex_unlock(&new_as->lock);
}
spinlock_unlock(&inactive_as_with_asid_lock);
interrupts_restore(ipl);
933,9 → 935,9
* Perform architecture-specific steps.
* (e.g. write ASID to hardware register etc.)
*/
as_install_arch(replace);
as_install_arch(new_as);
AS = replace;
AS = new_as;
}
 
/** Convert address space area flags to page flags.
/trunk/kernel/arch/sparc64/include/mm/tsb.h
54,7 → 54,6
#include <arch/mm/tte.h>
#include <arch/mm/mmu.h>
#include <arch/types.h>
#include <mm/as.h>
 
/** TSB Base register. */
typedef union tsb_base_reg {
108,10 → 107,14
asi_u64_write(ASI_DMMU, VA_DMMU_TSB_BASE, v);
}
 
extern void tsb_invalidate(as_t *as, uintptr_t page, count_t pages);
extern void itsb_pte_copy(pte_t *t);
extern void dtsb_pte_copy(pte_t *t, bool ro);
/* Forward declarations. */
struct as;
struct pte;
 
extern void tsb_invalidate(struct as *as, uintptr_t page, count_t pages);
extern void itsb_pte_copy(struct pte *t);
extern void dtsb_pte_copy(struct pte *t, bool ro);
 
#endif /* !def __ASM__ */
 
#endif
/trunk/kernel/arch/ia32xen/src/mm/page.c
53,9 → 53,9
{
if (config.cpu_active == 1) {
page_mapping_operations = &pt_mapping_operations;
AS_KERNEL->page_table = (pte_t *) KA2PA(start_info.ptl0);
AS_KERNEL->genarch.page_table = (pte_t *) KA2PA(start_info.ptl0);
} else
SET_PTL0_ADDRESS_ARCH(AS_KERNEL->page_table);
SET_PTL0_ADDRESS_ARCH(AS_KERNEL->genarch.page_table);
}
 
void page_fault(int n, istate_t *istate)
/trunk/kernel/arch/amd64/src/pm.c
201,7 → 201,7
/* We are going to use malloc, which may return
* non boot-mapped pointer, initialize the CR3 register
* ahead of page_init */
write_cr3((uintptr_t) AS_KERNEL->page_table);
write_cr3((uintptr_t) AS_KERNEL->genarch.page_table);
 
tss_p = (struct tss *) malloc(sizeof(tss_t), FRAME_ATOMIC);
if (!tss_p)
/trunk/kernel/arch/amd64/src/mm/page.c
111,10 → 111,10
}
 
exc_register(14, "page_fault", (iroutine) page_fault);
write_cr3((uintptr_t) AS_KERNEL->page_table);
write_cr3((uintptr_t) AS_KERNEL->genarch.page_table);
}
else {
write_cr3((uintptr_t) AS_KERNEL->page_table);
write_cr3((uintptr_t) AS_KERNEL->genarch.page_table);
}
}
 
/trunk/kernel/arch/ia32/src/mm/page.c
68,10 → 68,10
}
 
exc_register(14, "page_fault", (iroutine) page_fault);
write_cr3((uintptr_t) AS_KERNEL->page_table);
write_cr3((uintptr_t) AS_KERNEL->genarch.page_table);
}
else {
write_cr3((uintptr_t) AS_KERNEL->page_table);
write_cr3((uintptr_t) AS_KERNEL->genarch.page_table);
}
 
paging_on();