/trunk/kernel/generic/include/proc/thread.h |
---|
52,7 → 52,7 |
#define THREAD_STACK_SIZE STACK_SIZE |
/**< Thread states. */ |
/** Thread states. */ |
enum state { |
Invalid, /**< It is an error, if thread is found in this state. */ |
Running, /**< State of a thread that is currently executing on some CPU. */ |
65,7 → 65,7 |
extern char *thread_states[]; |
/**< Join types. */ |
/** Join types. */ |
typedef enum { |
None, |
TaskClnp, /**< The thread will be joined by ktaskclnp thread. */ |
72,8 → 72,10 |
TaskGC /**< The thread will be joined by ktaskgc thread. */ |
} thread_join_type_t; |
#define X_WIRED (1<<0) |
#define X_STOLEN (1<<1) |
/* Thread flags */ |
#define THREAD_FLAG_WIRED (1<<0) /**< Thread cannot be migrated to another CPU. */ |
#define THREAD_FLAG_STOLEN (1<<1) /**< Thread was migrated to another CPU and has not run yet. */ |
#define THREAD_FLAG_USPACE (1<<2) /**< Thread executes in userspace. */ |
#define THREAD_NAME_BUFLEN 20 |
127,7 → 129,7 |
/* |
* Defined only if thread doesn't run. |
* It means that fpu context is in CPU that last time executes this thread. |
* This disables migration |
* This disables migration. |
*/ |
int fpu_context_engaged; |
149,7 → 151,7 |
thread_arch_t arch; /**< Architecture-specific data. */ |
uint8_t *kstack; /**< Thread's kernel stack. */ |
uint8_t *kstack; /**< Thread's kernel stack. */ |
}; |
/** Thread list lock. |
170,6 → 172,12 |
#ifndef thread_create_arch |
extern void thread_create_arch(thread_t *t); |
#endif |
#ifndef thr_constructor_arch |
extern void thr_constructor_arch(thread_t *t); |
#endif |
#ifndef thr_destructor_arch |
extern void thr_destructor_arch(thread_t *t); |
#endif |
extern void thread_sleep(uint32_t sec); |
extern void thread_usleep(uint32_t usec); |
/trunk/kernel/generic/include/mm/frame.h |
---|
27,14 → 27,14 |
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
*/ |
/** @addtogroup genericmm |
/** @addtogroup genericmm |
* @{ |
*/ |
/** @file |
*/ |
#ifndef __FRAME_H__ |
#define __FRAME_H__ |
#ifndef KERN_FRAME_H_ |
#define KERN_FRAME_H_ |
#include <arch/types.h> |
#include <typedefs.h> |
61,10 → 61,6 |
#define FRAME_ATOMIC 0x2 /* do not panic and do not sleep on failure */ |
#define FRAME_NO_RECLAIM 0x4 /* do not start reclaiming when no free memory */ |
#define FRAME_OK 0 /* frame_alloc return status */ |
#define FRAME_NO_MEMORY 1 /* frame_alloc return status */ |
#define FRAME_ERROR 2 /* frame_alloc return status */ |
static inline uintptr_t PFN2ADDR(pfn_t frame) |
{ |
return (uintptr_t)(frame << FRAME_WIDTH); |
88,7 → 84,7 |
#define IS_BUDDY_LEFT_BLOCK_ABS(zone, frame) (((frame_index_abs((zone), (frame)) >> (frame)->buddy_order) & 0x1) == 0) |
#define IS_BUDDY_RIGHT_BLOCK_ABS(zone, frame) (((frame_index_abs((zone), (frame)) >> (frame)->buddy_order) & 0x1) == 1) |
#define frame_alloc(order, flags) frame_alloc_generic(order, flags, NULL) |
#define frame_alloc(order, flags) frame_alloc_generic(order, flags, NULL) |
extern void frame_init(void); |
extern void * frame_alloc_generic(uint8_t order, int flags, int *pzone); |
111,6 → 107,5 |
#endif |
/** @} |
/** @} |
*/ |
/trunk/kernel/generic/src/main/kinit.c |
---|
100,9 → 100,8 |
* not mess together with kcpulb threads. |
* Just a beautification. |
*/ |
if ((t = thread_create(kmp, NULL, TASK, 0, "kmp"))) { |
if ((t = thread_create(kmp, NULL, TASK, THREAD_FLAG_WIRED, "kmp"))) { |
spinlock_lock(&t->lock); |
t->flags |= X_WIRED; |
t->cpu = &cpus[0]; |
spinlock_unlock(&t->lock); |
thread_ready(t); |
126,9 → 125,8 |
*/ |
for (i = 0; i < config.cpu_count; i++) { |
if ((t = thread_create(kcpulb, NULL, TASK, 0, "kcpulb"))) { |
if ((t = thread_create(kcpulb, NULL, TASK, THREAD_FLAG_WIRED, "kcpulb"))) { |
spinlock_lock(&t->lock); |
t->flags |= X_WIRED; |
t->cpu = &cpus[i]; |
spinlock_unlock(&t->lock); |
thread_ready(t); |
/trunk/kernel/generic/src/proc/scheduler.c |
---|
141,8 → 141,7 |
/* Might sleep */ |
spinlock_unlock(&THREAD->lock); |
spinlock_unlock(&CPU->lock); |
THREAD->saved_fpu_context = slab_alloc(fpu_context_slab, |
0); |
THREAD->saved_fpu_context = slab_alloc(fpu_context_slab, 0); |
/* We may have switched CPUs during slab_alloc */ |
goto restart; |
} |
235,9 → 234,10 |
t->priority = i; /* correct rq index */ |
/* |
* Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge. |
* Clear the THREAD_FLAG_STOLEN flag so that t can be migrated |
* when load balancing needs emerge. |
*/ |
t->flags &= ~X_STOLEN; |
t->flags &= ~THREAD_FLAG_STOLEN; |
spinlock_unlock(&t->lock); |
return t; |
349,7 → 349,8 |
* scheduler_separated_stack(). |
*/ |
context_save(&CPU->saved_context); |
context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (uintptr_t) CPU->stack, CPU_STACK_SIZE); |
context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), |
(uintptr_t) CPU->stack, CPU_STACK_SIZE); |
context_restore(&CPU->saved_context); |
/* not reached */ |
} |
483,7 → 484,8 |
THREAD->state = Running; |
#ifdef SCHEDULER_VERBOSE |
printf("cpu%d: tid %d (priority=%d,ticks=%lld,nrdy=%ld)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy)); |
printf("cpu%d: tid %d (priority=%d,ticks=%lld,nrdy=%ld)\n", |
CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy)); |
#endif |
/* |
556,7 → 558,7 |
/* |
* Not interested in ourselves. |
* Doesn't require interrupt disabling for kcpulb is X_WIRED. |
* Doesn't require interrupt disabling for kcpulb has THREAD_FLAG_WIRED. |
*/ |
if (CPU == cpu) |
continue; |
577,12 → 579,14 |
while (l != &r->rq_head) { |
t = list_get_instance(l, thread_t, rq_link); |
/* |
* We don't want to steal CPU-wired threads neither threads already stolen. |
* The latter prevents threads from migrating between CPU's without ever being run. |
* We don't want to steal threads whose FPU context is still in CPU. |
* We don't want to steal CPU-wired threads neither threads already |
* stolen. The latter prevents threads from migrating between CPU's |
* without ever being run. We don't want to steal threads whose FPU |
* context is still in CPU. |
*/ |
spinlock_lock(&t->lock); |
if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) { |
if ((!(t->flags & (THREAD_FLAG_WIRED | THREAD_FLAG_STOLEN))) && |
(!(t->fpu_context_engaged)) ) { |
/* |
* Remove t from r. |
*/ |
608,9 → 612,11 |
*/ |
spinlock_lock(&t->lock); |
#ifdef KCPULB_VERBOSE |
printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, avg=%nd\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active); |
printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, avg=%nd\n", |
CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), |
atomic_get(&nrdy) / config.cpu_active); |
#endif |
t->flags |= X_STOLEN; |
t->flags |= THREAD_FLAG_STOLEN; |
t->state = Entering; |
spinlock_unlock(&t->lock); |
/trunk/kernel/generic/src/proc/thread.c |
---|
129,6 → 129,9 |
link_initialize(&t->rq_link); |
link_initialize(&t->wq_link); |
link_initialize(&t->th_link); |
/* call the architecture-specific part of the constructor */ |
thr_constructor_arch(t); |
#ifdef ARCH_HAS_FPU |
# ifdef CONFIG_FPU_LAZY |
157,6 → 160,9 |
{ |
thread_t *t = (thread_t *) obj; |
/* call the architecture-specific part of the destructor */ |
thr_destructor_arch(t); |
frame_free(KA2PA(t->kstack)); |
#ifdef ARCH_HAS_FPU |
if (t->saved_fpu_context) |
210,7 → 216,7 |
i = (t->priority < RQ_COUNT -1) ? ++t->priority : t->priority; |
cpu = CPU; |
if (t->flags & X_WIRED) { |
if (t->flags & THREAD_FLAG_WIRED) { |
cpu = t->cpu; |
} |
t->state = Ready; |
295,8 → 301,6 |
t = (thread_t *) slab_alloc(thread_slab, 0); |
if (!t) |
return NULL; |
thread_create_arch(t); |
/* Not needed, but good for debugging */ |
memsetb((uintptr_t) t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0); |
323,7 → 327,7 |
t->ticks = -1; |
t->priority = -1; /* start in rq[0] */ |
t->cpu = NULL; |
t->flags = 0; |
t->flags = flags; |
t->state = Entering; |
t->call_me = NULL; |
t->call_me_with = NULL; |
347,6 → 351,8 |
t->fpu_context_exists = 0; |
t->fpu_context_engaged = 0; |
thread_create_arch(t); /* might depend on previous initialization */ |
/* |
* Attach to the containing task. |
589,7 → 595,7 |
return (unative_t) rc; |
} |
if ((t = thread_create(uinit, kernel_uarg, TASK, 0, namebuf))) { |
if ((t = thread_create(uinit, kernel_uarg, TASK, THREAD_FLAG_USPACE, namebuf))) { |
tid = t->tid; |
thread_ready(t); |
return (unative_t) tid; |
/trunk/kernel/generic/src/mm/frame.c |
---|
928,7 → 928,6 |
* |
* @param order Allocate exactly 2^order frames. |
* @param flags Flags for host zone selection and address processing. |
* @param status Allocation status (FRAME_OK on success), unused if NULL. |
* @param pzone Preferred zone |
* |
* @return Physical address of the allocated frame. |
987,7 → 986,7 |
/** Free a frame. |
* |
* Find respective frame structure for supplied PFN. |
* Find respective frame structure for supplied physical frame address. |
* Decrement frame reference count. |
* If it drops to zero, move the frame structure to free list. |
* |