Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 99 → Rev 107

/SPARTAN/trunk/src/proc/scheduler.c
56,6 → 56,13
spinlock_t nrdylock;
volatile int nrdy;
 
 
/** Initialize context switching
*
* Initialize context switching and lazy FPU
* context switching.
*
*/
void before_thread_runs(void)
{
before_thread_runs_arch();
63,12 → 70,26
}
 
 
/** Initialize scheduler
*
* Initialize kernel scheduler.
*
*/
void scheduler_init(void)
{
spinlock_initialize(&nrdylock);
}
 
/* cpu_priority_high()'d */
 
/** Get thread to be scheduled
*
* Get the optimal thread to be scheduled
* according thread accounting and scheduler
* policy.
*
* @return Thread to be scheduled.
*
*/
struct thread *find_best_thread(void)
{
thread_t *t;
155,11 → 176,17
 
}
 
/*
* This function prevents low priority threads from starving in rq's.
* When it decides to relink rq's, it reconnects respective pointers
* so that in result threads with 'pri' greater or equal 'start' are
* moved to a higher-priority queue.
 
/** Prevent rq starvation
*
* Prevent low priority threads from starving in rq's.
*
* When the function decides to relink rq's, it reconnects
* respective pointers so that in result threads with 'pri'
* greater or equal 'start' are moved to a higher-priority queue.
*
* @param start Threshold priority.
*
*/
void relink_rq(int start)
{
192,8 → 219,11
 
}
 
/*
* The scheduler.
 
/** The scheduler
*
* The thread scheduling procedure.
*
*/
void scheduler(void)
{
237,6 → 267,14
/* not reached */
}
 
 
/** Scheduler stack switch wrapper
*
* Second part of the scheduler() function
* using new stack. Handling the actual context
* switch to a new thread.
*
*/
void scheduler_separated_stack(void)
{
int priority;
365,10 → 403,15
/* not reached */
}
 
 
#ifdef __SMP__
/*
* This is the load balancing thread.
* It supervises thread supplies for the CPU it's wired to.
/** Load balancing thread
*
* SMP load balancing thread, supervising thread supplies
* for the CPU it's wired to.
*
* @param arg Generic thread argument (unused).
*
*/
void kcpulb(void *arg)
{
/SPARTAN/trunk/src/proc/task.c
39,6 → 39,12
spinlock_t tasks_lock;
link_t tasks_head;
 
 
/** Initialize tasks
*
* Initialize kernel tasks support.
*
*/
void task_init(void)
{
TASK = NULL;
46,6 → 52,16
list_initialize(&tasks_head);
}
 
 
/** Create new task
*
* Create new task with no threads.
*
* @param m Task's virtual memory structure.
*
* @return New task's structure on success, NULL on failure.
*
*/
task_t *task_create(vm_t *m)
{
pri_t pri;
/SPARTAN/trunk/src/proc/thread.c
50,7 → 50,7
#include <smp/ipi.h>
#include <arch/faddr.h>
 
char *thread_states[] = {"Invalid", "Running", "Sleeping", "Ready", "Entering", "Exiting"};
char *thread_states[] = {"Invalid", "Running", "Sleeping", "Ready", "Entering", "Exiting"}; /**< Thread states */
 
spinlock_t threads_lock;
link_t threads_head;
58,12 → 58,15
static spinlock_t tidlock;
__u32 last_tid = 0;
 
/*
* cushion() is provided to ensure that every thread
 
/** Thread wrapper
*
* This wrapper is provided to ensure that every thread
* makes a call to thread_exit() when its implementing
* function returns.
*
* cpu_priority_high()'d
* cpu_priority_high() is assumed.
*
*/
void cushion(void)
{
81,6 → 84,12
/* not reached */
}
 
 
/** Initialize threads
*
* Initialize kernel threads support.
*
*/
void thread_init(void)
{
THREAD = NULL;
89,6 → 98,14
list_initialize(&threads_head);
}
 
 
/** Make thread ready
*
* Switch thread t to the ready state.
*
* @param t Thread to make ready.
*
*/
void thread_ready(thread_t *t)
{
cpu_t *cpu;
108,7 → 125,7
}
spinlock_unlock(&t->lock);
/*
/*
* Append t to respective ready queue on respective processor.
*/
r = &cpu->rq[i];
133,6 → 150,19
cpu_priority_restore(pri);
}
 
 
/** Create new thread
*
* Create a new thread.
*
* @param func Thread's implementing function.
* @param arg Thread's implementing function argument.
* @param task Task to which the thread belongs.
* @param flags Thread flags.
*
* @return New thread's structure on success, NULL on failure.
*
*/
thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags)
{
thread_t *t;
213,6 → 243,13
return t;
}
 
 
/** Make thread exiting
*
* End current thread execution and switch it to the exiting
* state. All pending timeouts are executed.
*
*/
void thread_exit(void)
{
pri_t pri;
230,14 → 267,27
scheduler();
}
 
 
/** Thread sleep
*
* Suspend execution of the current thread.
*
* @param sec Number of seconds to sleep.
*
*/
void thread_sleep(__u32 sec)
{
thread_usleep(sec*1000000);
}
/*
* Suspend execution of current thread for usec microseconds.
*/
 
 
/** Thread usleep
*
* Suspend execution of the current thread.
*
* @param usec Number of microseconds to sleep.
*
*/
void thread_usleep(__u32 usec)
{
waitq_t wq;
247,6 → 297,16
(void) waitq_sleep_timeout(&wq, usec, SYNCH_NON_BLOCKING);
}
 
 
/** Register thread out-of-context invocation
*
* Register a function and its argument to be executed
* on next context switch to the current thread.
*
* @param call_me Out-of-context function.
* @param call_me_with Out-of-context function argument.
*
*/
void thread_register_call_me(void (* call_me)(void *), void *call_me_with)
{
pri_t pri;