Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 106 → Rev 107

/SPARTAN/trunk/src/time/delay.c
32,14 → 32,22
#include <arch/asm.h>
#include <arch.h>
 
/*
* Note that the delay loop is calibrated for each and every CPU in the system.
* Therefore it is necessary to cpu_priority_high() before calling the asm_delay_loop().
/** Active delay
*
* Delay the execution for the given number
* of microseconds (or slightly more). The delay
* is implemented as CPU calibrated active loop.
*
* @param microseconds Number of usec to sleep.
*
*/
void delay(__u32 microseconds)
{
pri_t pri;
 
/* The delay loop is calibrated for each and every
CPU in the system. Therefore it is necessary to
cpu_priority_high() before calling the asm_delay_loop(). */
pri = cpu_priority_high();
asm_delay_loop(microseconds * CPU->delay_loop_const);
cpu_priority_restore(pri);
/SPARTAN/trunk/src/time/timeout.c
38,6 → 38,12
#include <arch/asm.h>
#include <arch.h>
 
 
/** Initialize timeouts
*
* Initialize kernel timeouts.
*
*/
void timeout_init(void)
{
spinlock_initialize(&CPU->timeoutlock);
45,6 → 51,13
}
 
 
/** Initialize empty timeout list
*
* Initialize the timeout list to be empty.
*
* @param t Timeout list to be initialized.
*
*/
void timeout_reinitialize(timeout_t *t)
{
t->cpu = NULL;
54,6 → 67,14
link_initialize(&t->link);
}
 
 
/** Initialize timeout list
*
* Initialize the timeout list and its spinlock.
*
* @param t Timeout list to be initialized.
*
*/
void timeout_initialize(timeout_t *t)
{
spinlock_initialize(&t->lock);
60,8 → 81,19
timeout_reinitialize(t);
}
 
/*
* This function registers f for execution in about time microseconds.
 
/** Register timeout callback
*
* Insert the timeout handler f (with argument arg)
* to the timeout list and make it execute in
* time microseconds (or slightly more).
*
* @param t Timeout list.
* @patam time Number of usec in the future to execute
* the handler.
* @param f Timeout handler function.
* @param arg Timeout handler argument.
*
*/
void timeout_register(timeout_t *t, __u64 time, timeout_handler f, void *arg)
{
121,6 → 153,14
cpu_priority_restore(pri);
}
 
 
/** Unregister timeout callback
*
* Remove timeout from timeout list.
*
* @param t Timeout to unregister.
*
*/
int timeout_unregister(timeout_t *t)
{
timeout_t *hlp;
/SPARTAN/trunk/src/time/clock.c
43,8 → 43,12
#include <arch/smp/atomic.h>
#endif
 
/*
* Clock is called from an interrupt and is cpu_priority_high()'d.
/** Clock routine
*
* Clock routine executed from clock interrupt handler
* (assuming cpu_priority_high()). Runs expired timeouts
* and preemptive scheduling.
*
*/
void clock(void)
{
/SPARTAN/trunk/src/smp/ipi.c
31,6 → 31,14
#include <smp/ipi.h>
#include <config.h>
 
 
/** Broadcast IPI message
*
* Broadcast IPI message to all CPUs.
*
* @param ipi Message to broadcast.
*
*/
void ipi_broadcast(int ipi)
{
/*
/SPARTAN/trunk/src/proc/scheduler.c
56,6 → 56,13
spinlock_t nrdylock;
volatile int nrdy;
 
 
/** Initialize context switching
*
* Initialize context switching and lazy FPU
* context switching.
*
*/
void before_thread_runs(void)
{
before_thread_runs_arch();
63,12 → 70,26
}
 
 
/** Initialize scheduler
*
* Initialize kernel scheduler.
*
*/
void scheduler_init(void)
{
spinlock_initialize(&nrdylock);
}
 
/* cpu_priority_high()'d */
 
/** Get thread to be scheduled
*
* Get the optimal thread to be scheduled
* according thread accounting and scheduler
* policy.
*
* @return Thread to be scheduled.
*
*/
struct thread *find_best_thread(void)
{
thread_t *t;
155,11 → 176,17
 
}
 
/*
* This function prevents low priority threads from starving in rq's.
* When it decides to relink rq's, it reconnects respective pointers
* so that in result threads with 'pri' greater or equal 'start' are
* moved to a higher-priority queue.
 
/** Prevent rq starvation
*
* Prevent low priority threads from starving in rq's.
*
* When the function decides to relink rq's, it reconnects
* respective pointers so that in result threads with 'pri'
* greater or equal 'start' are moved to a higher-priority queue.
*
* @param start Threshold priority.
*
*/
void relink_rq(int start)
{
192,8 → 219,11
 
}
 
/*
* The scheduler.
 
/** The scheduler
*
* The thread scheduling procedure.
*
*/
void scheduler(void)
{
237,6 → 267,14
/* not reached */
}
 
 
/** Scheduler stack switch wrapper
*
* Second part of the scheduler() function
* using new stack. Handling the actual context
* switch to a new thread.
*
*/
void scheduler_separated_stack(void)
{
int priority;
365,10 → 403,15
/* not reached */
}
 
 
#ifdef __SMP__
/*
* This is the load balancing thread.
* It supervises thread supplies for the CPU it's wired to.
/** Load balancing thread
*
* SMP load balancing thread, supervising thread supplies
* for the CPU it's wired to.
*
* @param arg Generic thread argument (unused).
*
*/
void kcpulb(void *arg)
{
/SPARTAN/trunk/src/proc/task.c
39,6 → 39,12
spinlock_t tasks_lock;
link_t tasks_head;
 
 
/** Initialize tasks
*
* Initialize kernel tasks support.
*
*/
void task_init(void)
{
TASK = NULL;
46,6 → 52,16
list_initialize(&tasks_head);
}
 
 
/** Create new task
*
* Create new task with no threads.
*
* @param m Task's virtual memory structure.
*
* @return New task's structure on success, NULL on failure.
*
*/
task_t *task_create(vm_t *m)
{
pri_t pri;
/SPARTAN/trunk/src/proc/thread.c
50,7 → 50,7
#include <smp/ipi.h>
#include <arch/faddr.h>
 
char *thread_states[] = {"Invalid", "Running", "Sleeping", "Ready", "Entering", "Exiting"};
char *thread_states[] = {"Invalid", "Running", "Sleeping", "Ready", "Entering", "Exiting"}; /**< Thread states */
 
spinlock_t threads_lock;
link_t threads_head;
58,12 → 58,15
static spinlock_t tidlock;
__u32 last_tid = 0;
 
/*
* cushion() is provided to ensure that every thread
 
/** Thread wrapper
*
* This wrapper is provided to ensure that every thread
* makes a call to thread_exit() when its implementing
* function returns.
*
* cpu_priority_high()'d
* cpu_priority_high() is assumed.
*
*/
void cushion(void)
{
81,6 → 84,12
/* not reached */
}
 
 
/** Initialize threads
*
* Initialize kernel threads support.
*
*/
void thread_init(void)
{
THREAD = NULL;
89,6 → 98,14
list_initialize(&threads_head);
}
 
 
/** Make thread ready
*
* Switch thread t to the ready state.
*
* @param t Thread to make ready.
*
*/
void thread_ready(thread_t *t)
{
cpu_t *cpu;
108,7 → 125,7
}
spinlock_unlock(&t->lock);
/*
/*
* Append t to respective ready queue on respective processor.
*/
r = &cpu->rq[i];
133,6 → 150,19
cpu_priority_restore(pri);
}
 
 
/** Create new thread
*
* Create a new thread.
*
* @param func Thread's implementing function.
* @param arg Thread's implementing function argument.
* @param task Task to which the thread belongs.
* @param flags Thread flags.
*
* @return New thread's structure on success, NULL on failure.
*
*/
thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags)
{
thread_t *t;
213,6 → 243,13
return t;
}
 
 
/** Make thread exiting
*
* End current thread execution and switch it to the exiting
* state. All pending timeouts are executed.
*
*/
void thread_exit(void)
{
pri_t pri;
230,14 → 267,27
scheduler();
}
 
 
/** Thread sleep
*
* Suspend execution of the current thread.
*
* @param sec Number of seconds to sleep.
*
*/
void thread_sleep(__u32 sec)
{
thread_usleep(sec*1000000);
}
/*
* Suspend execution of current thread for usec microseconds.
*/
 
 
/** Thread usleep
*
* Suspend execution of the current thread.
*
* @param usec Number of microseconds to sleep.
*
*/
void thread_usleep(__u32 usec)
{
waitq_t wq;
247,6 → 297,16
(void) waitq_sleep_timeout(&wq, usec, SYNCH_NON_BLOCKING);
}
 
 
/** Register thread out-of-context invocation
*
* Register a function and its argument to be executed
* on next context switch to the current thread.
*
* @param call_me Out-of-context function.
* @param call_me_with Out-of-context function argument.
*
*/
void thread_register_call_me(void (* call_me)(void *), void *call_me_with)
{
pri_t pri;