Subversion Repositories HelenOS

Compare Revisions

Ignore whitespace Rev 1853 → Rev 1854

/trunk/kernel/generic/include/proc/thread.h
52,7 → 52,7
 
#define THREAD_STACK_SIZE STACK_SIZE
 
/**< Thread states. */
/** Thread states. */
enum state {
Invalid, /**< It is an error, if thread is found in this state. */
Running, /**< State of a thread that is currently executing on some CPU. */
65,7 → 65,7
 
extern char *thread_states[];
 
/**< Join types. */
/** Join types. */
typedef enum {
None,
TaskClnp, /**< The thread will be joined by ktaskclnp thread. */
72,8 → 72,10
TaskGC /**< The thread will be joined by ktaskgc thread. */
} thread_join_type_t;
 
#define X_WIRED (1<<0)
#define X_STOLEN (1<<1)
/* Thread flags */
#define THREAD_FLAG_WIRED (1<<0) /**< Thread cannot be migrated to another CPU. */
#define THREAD_FLAG_STOLEN (1<<1) /**< Thread was migrated to another CPU and has not run yet. */
#define THREAD_FLAG_USPACE (1<<2) /**< Thread executes in userspace. */
 
#define THREAD_NAME_BUFLEN 20
 
127,7 → 129,7
/*
* Defined only if thread doesn't run.
* It means that fpu context is in CPU that last time executes this thread.
* This disables migration
* This disables migration.
*/
int fpu_context_engaged;
 
149,7 → 151,7
thread_arch_t arch; /**< Architecture-specific data. */
 
uint8_t *kstack; /**< Thread's kernel stack. */
uint8_t *kstack; /**< Thread's kernel stack. */
};
 
/** Thread list lock.
170,6 → 172,12
#ifndef thread_create_arch
extern void thread_create_arch(thread_t *t);
#endif
#ifndef thr_constructor_arch
extern void thr_constructor_arch(thread_t *t);
#endif
#ifndef thr_destructor_arch
extern void thr_destructor_arch(thread_t *t);
#endif
 
extern void thread_sleep(uint32_t sec);
extern void thread_usleep(uint32_t usec);
/trunk/kernel/generic/include/mm/frame.h
27,14 → 27,14
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup genericmm
/** @addtogroup genericmm
* @{
*/
/** @file
*/
 
#ifndef __FRAME_H__
#define __FRAME_H__
#ifndef KERN_FRAME_H_
#define KERN_FRAME_H_
 
#include <arch/types.h>
#include <typedefs.h>
61,10 → 61,6
#define FRAME_ATOMIC 0x2 /* do not panic and do not sleep on failure */
#define FRAME_NO_RECLAIM 0x4 /* do not start reclaiming when no free memory */
 
#define FRAME_OK 0 /* frame_alloc return status */
#define FRAME_NO_MEMORY 1 /* frame_alloc return status */
#define FRAME_ERROR 2 /* frame_alloc return status */
 
static inline uintptr_t PFN2ADDR(pfn_t frame)
{
return (uintptr_t)(frame << FRAME_WIDTH);
88,7 → 84,7
#define IS_BUDDY_LEFT_BLOCK_ABS(zone, frame) (((frame_index_abs((zone), (frame)) >> (frame)->buddy_order) & 0x1) == 0)
#define IS_BUDDY_RIGHT_BLOCK_ABS(zone, frame) (((frame_index_abs((zone), (frame)) >> (frame)->buddy_order) & 0x1) == 1)
 
#define frame_alloc(order, flags) frame_alloc_generic(order, flags, NULL)
#define frame_alloc(order, flags) frame_alloc_generic(order, flags, NULL)
 
extern void frame_init(void);
extern void * frame_alloc_generic(uint8_t order, int flags, int *pzone);
111,6 → 107,5
 
#endif
 
/** @}
/** @}
*/
 
/trunk/kernel/generic/src/main/kinit.c
100,9 → 100,8
* not mess together with kcpulb threads.
* Just a beautification.
*/
if ((t = thread_create(kmp, NULL, TASK, 0, "kmp"))) {
if ((t = thread_create(kmp, NULL, TASK, THREAD_FLAG_WIRED, "kmp"))) {
spinlock_lock(&t->lock);
t->flags |= X_WIRED;
t->cpu = &cpus[0];
spinlock_unlock(&t->lock);
thread_ready(t);
126,9 → 125,8
*/
for (i = 0; i < config.cpu_count; i++) {
 
if ((t = thread_create(kcpulb, NULL, TASK, 0, "kcpulb"))) {
if ((t = thread_create(kcpulb, NULL, TASK, THREAD_FLAG_WIRED, "kcpulb"))) {
spinlock_lock(&t->lock);
t->flags |= X_WIRED;
t->cpu = &cpus[i];
spinlock_unlock(&t->lock);
thread_ready(t);
/trunk/kernel/generic/src/proc/scheduler.c
141,8 → 141,7
/* Might sleep */
spinlock_unlock(&THREAD->lock);
spinlock_unlock(&CPU->lock);
THREAD->saved_fpu_context = slab_alloc(fpu_context_slab,
0);
THREAD->saved_fpu_context = slab_alloc(fpu_context_slab, 0);
/* We may have switched CPUs during slab_alloc */
goto restart;
}
235,9 → 234,10
t->priority = i; /* correct rq index */
 
/*
* Clear the X_STOLEN flag so that t can be migrated when load balancing needs emerge.
* Clear the THREAD_FLAG_STOLEN flag so that t can be migrated
* when load balancing needs emerge.
*/
t->flags &= ~X_STOLEN;
t->flags &= ~THREAD_FLAG_STOLEN;
spinlock_unlock(&t->lock);
 
return t;
349,7 → 349,8
* scheduler_separated_stack().
*/
context_save(&CPU->saved_context);
context_set(&CPU->saved_context, FADDR(scheduler_separated_stack), (uintptr_t) CPU->stack, CPU_STACK_SIZE);
context_set(&CPU->saved_context, FADDR(scheduler_separated_stack),
(uintptr_t) CPU->stack, CPU_STACK_SIZE);
context_restore(&CPU->saved_context);
/* not reached */
}
483,7 → 484,8
THREAD->state = Running;
 
#ifdef SCHEDULER_VERBOSE
printf("cpu%d: tid %d (priority=%d,ticks=%lld,nrdy=%ld)\n", CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy));
printf("cpu%d: tid %d (priority=%d,ticks=%lld,nrdy=%ld)\n",
CPU->id, THREAD->tid, THREAD->priority, THREAD->ticks, atomic_get(&CPU->nrdy));
#endif
 
/*
556,7 → 558,7
 
/*
* Not interested in ourselves.
* Doesn't require interrupt disabling for kcpulb is X_WIRED.
* Doesn't require interrupt disabling for kcpulb has THREAD_FLAG_WIRED.
*/
if (CPU == cpu)
continue;
577,12 → 579,14
while (l != &r->rq_head) {
t = list_get_instance(l, thread_t, rq_link);
/*
* We don't want to steal CPU-wired threads neither threads already stolen.
* The latter prevents threads from migrating between CPU's without ever being run.
* We don't want to steal threads whose FPU context is still in CPU.
* We don't want to steal CPU-wired threads neither threads already
* stolen. The latter prevents threads from migrating between CPU's
* without ever being run. We don't want to steal threads whose FPU
* context is still in CPU.
*/
spinlock_lock(&t->lock);
if ( (!(t->flags & (X_WIRED | X_STOLEN))) && (!(t->fpu_context_engaged)) ) {
if ((!(t->flags & (THREAD_FLAG_WIRED | THREAD_FLAG_STOLEN))) &&
(!(t->fpu_context_engaged)) ) {
/*
* Remove t from r.
*/
608,9 → 612,11
*/
spinlock_lock(&t->lock);
#ifdef KCPULB_VERBOSE
printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, avg=%nd\n", CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy), atomic_get(&nrdy) / config.cpu_active);
printf("kcpulb%d: TID %d -> cpu%d, nrdy=%ld, avg=%nd\n",
CPU->id, t->tid, CPU->id, atomic_get(&CPU->nrdy),
atomic_get(&nrdy) / config.cpu_active);
#endif
t->flags |= X_STOLEN;
t->flags |= THREAD_FLAG_STOLEN;
t->state = Entering;
spinlock_unlock(&t->lock);
/trunk/kernel/generic/src/proc/thread.c
129,6 → 129,9
link_initialize(&t->rq_link);
link_initialize(&t->wq_link);
link_initialize(&t->th_link);
 
/* call the architecture-specific part of the constructor */
thr_constructor_arch(t);
#ifdef ARCH_HAS_FPU
# ifdef CONFIG_FPU_LAZY
157,6 → 160,9
{
thread_t *t = (thread_t *) obj;
 
/* call the architecture-specific part of the destructor */
thr_destructor_arch(t);
 
frame_free(KA2PA(t->kstack));
#ifdef ARCH_HAS_FPU
if (t->saved_fpu_context)
210,7 → 216,7
i = (t->priority < RQ_COUNT -1) ? ++t->priority : t->priority;
cpu = CPU;
if (t->flags & X_WIRED) {
if (t->flags & THREAD_FLAG_WIRED) {
cpu = t->cpu;
}
t->state = Ready;
295,8 → 301,6
t = (thread_t *) slab_alloc(thread_slab, 0);
if (!t)
return NULL;
 
thread_create_arch(t);
/* Not needed, but good for debugging */
memsetb((uintptr_t) t->kstack, THREAD_STACK_SIZE * 1 << STACK_FRAMES, 0);
323,7 → 327,7
t->ticks = -1;
t->priority = -1; /* start in rq[0] */
t->cpu = NULL;
t->flags = 0;
t->flags = flags;
t->state = Entering;
t->call_me = NULL;
t->call_me_with = NULL;
347,6 → 351,8
t->fpu_context_exists = 0;
t->fpu_context_engaged = 0;
 
thread_create_arch(t); /* might depend on previous initialization */
/*
* Attach to the containing task.
589,7 → 595,7
return (unative_t) rc;
}
 
if ((t = thread_create(uinit, kernel_uarg, TASK, 0, namebuf))) {
if ((t = thread_create(uinit, kernel_uarg, TASK, THREAD_FLAG_USPACE, namebuf))) {
tid = t->tid;
thread_ready(t);
return (unative_t) tid;
/trunk/kernel/generic/src/mm/frame.c
928,7 → 928,6
*
* @param order Allocate exactly 2^order frames.
* @param flags Flags for host zone selection and address processing.
* @param status Allocation status (FRAME_OK on success), unused if NULL.
* @param pzone Preferred zone
*
* @return Physical address of the allocated frame.
987,7 → 986,7
 
/** Free a frame.
*
* Find respective frame structure for supplied PFN.
* Find respective frame structure for supplied physical frame address.
* Decrement frame reference count.
* If it drops to zero, move the frame structure to free list.
*
/trunk/kernel/arch/sparc64/include/arch.h
29,7 → 29,9
/** @addtogroup sparc64
* @{
*/
/** @file
/**
* @file
* @brief Various sparc64-specific macros.
*/
 
#ifndef __sparc64_ARCH_H__
38,8 → 40,9
#define ASI_AIUP 0x10 /** Access to primary context with user privileges. */
#define ASI_AIUS 0x11 /** Access to secondary context with user privileges. */
 
#define NWINDOW 8 /** Number of register window sets. */
 
#endif
 
/** @}
*/
 
/trunk/kernel/arch/sparc64/include/proc/thread.h
26,22 → 26,24
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup sparc64proc
/** @addtogroup sparc64proc
* @{
*/
/** @file
*/
 
#ifndef __sparc64_THREAD_H__
#define __sparc64_THREAD_H__
#ifndef KERN_sparc64_THREAD_H_
#define KERN_sparc64_THREAD_H_
 
#include <arch/types.h>
#include <arch/arch.h>
 
typedef struct {
/** Buffer for register windows with userspace content. */
uint8_t *uspace_window_buffer;
} thread_arch_t;
 
#define thread_create_arch(t)
 
#endif
 
/** @}
/** @}
*/
 
/trunk/kernel/arch/sparc64/include/trap/regwin.h
50,8 → 50,6
#define SPILL_HANDLER_SIZE REGWIN_HANDLER_SIZE
#define FILL_HANDLER_SIZE REGWIN_HANDLER_SIZE
 
#define NWINDOW 8
 
/* Window Save Area offsets. */
#define L0_OFFSET 0
#define L1_OFFSET 8
249,4 → 247,3
 
/** @}
*/
 
/trunk/kernel/arch/sparc64/include/barrier.h
51,10 → 51,14
/*
* The FLUSH instruction takes address parameter.
* As such, it may trap if the address is not found in DTLB.
* However, JPS1 implementations are free to ignore the trap.
*
* The entire kernel text is mapped by a locked ITLB and
* DTLB entries. Therefore, when this function is called,
* the %o7 register will always be in the range mapped by
* DTLB.
*/
__asm__ volatile ("flush %0\n" :: "r" (0x400000));
__asm__ volatile ("flush %o7\n");
}
 
/** Memory Barrier instruction. */
/trunk/kernel/arch/sparc64/Makefile.inc
94,6 → 94,7
arch/$(ARCH)/src/sparc64.c \
arch/$(ARCH)/src/start.S \
arch/$(ARCH)/src/proc/scheduler.c \
arch/$(ARCH)/src/proc/thread.c \
arch/$(ARCH)/src/trap/trap_table.S \
arch/$(ARCH)/src/trap/trap.c \
arch/$(ARCH)/src/trap/exception.c \
/trunk/kernel/arch/sparc64/src/proc/thread.c
0,0 → 1,65
/*
* Copyright (C) 2006 Jakub Jermar
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* - Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* - The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup sparc64proc
* @{
*/
/** @file
*/
 
#include <proc/thread.h>
#include <arch/proc/thread.h>
#include <mm/frame.h>
 
void thr_constructor_arch(thread_t *t)
{
/*
* Allocate memory for uspace_window_buffer.
*/
t->arch.uspace_window_buffer = NULL;
}
 
void thr_destructor_arch(thread_t *t)
{
if (t->arch.uspace_window_buffer)
frame_free((uintptr_t) t->arch.uspace_window_buffer);
}
 
void thread_create_arch(thread_t *t)
{
if ((t->flags & THREAD_FLAG_USPACE) && (!t->arch.uspace_window_buffer)) {
/*
* The thread needs userspace window buffer and the object
* returned from the slab allocator doesn't have any.
*/
t->arch.uspace_window_buffer = frame_alloc(ONE_FRAME, 0);
}
}
 
/** @}
*/
/trunk/kernel/arch/ia64/include/proc/thread.h
26,22 → 26,23
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup ia64proc
/** @addtogroup ia64proc
* @{
*/
/** @file
*/
 
#ifndef __ia64_THREAD_H__
#define __ia64_THREAD_H__
#ifndef KERN_ia64_THREAD_H_
#define KERN_ia64_THREAD_H_
 
typedef struct {
} thread_arch_t;
 
#define thr_constructor_arch(t)
#define thr_destructor_arch(t)
#define thread_create_arch(t)
 
#endif
 
/** @}
/** @}
*/
 
/trunk/kernel/arch/ppc32/include/proc/thread.h
26,22 → 26,23
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup ppc32proc
/** @addtogroup ppc32proc
* @{
*/
/** @file
*/
 
#ifndef __ppc32_THREAD_H__
#define __ppc32_THREAD_H__
#ifndef KERN_ppc32_THREAD_H_
#define KERN_ppc32_THREAD_H_
 
typedef struct {
} thread_arch_t;
 
#define thr_constructor_arch(t)
#define thr_destructor_arch(t)
#define thread_create_arch(t)
 
#endif
 
/** @}
/** @}
*/
 
/trunk/kernel/arch/amd64/include/proc/task.h
26,14 → 26,14
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup amd64proc
/** @addtogroup amd64proc
* @{
*/
/** @file
*/
 
#ifndef __amd64_TASK_H__
#define __amd64_TASK_H__
#ifndef KERN_amd64_TASK_H_
#define KERN_amd64_TASK_H_
 
#include <typedefs.h>
#include <arch/types.h>
46,6 → 46,5
 
#endif
 
/** @}
/** @}
*/
 
/trunk/kernel/arch/amd64/include/proc/thread.h
26,14 → 26,14
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup amd64proc
/** @addtogroup amd64proc
* @{
*/
/** @file
*/
 
#ifndef __amd64_THREAD_H__
#define __amd64_THREAD_H__
#ifndef KERN_amd64_THREAD_H_
#define KERN_amd64_THREAD_H_
 
#include <arch/types.h>
 
41,8 → 41,10
unative_t tls;
} thread_arch_t;
 
#define thr_constructor_arch(t)
#define thr_destructor_arch(t)
 
#endif
 
/** @}
/** @}
*/
 
/trunk/kernel/arch/amd64/src/proc/thread.c
26,7 → 26,7
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup amd64proc
/** @addtogroup amd64proc
* @{
*/
/** @file
43,6 → 43,5
t->arch.tls = 0;
}
 
/** @}
/** @}
*/
 
/trunk/kernel/arch/ppc64/include/proc/thread.h
26,22 → 26,24
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup ppc64proc
/** @addtogroup ppc64proc
* @{
*/
/** @file
*/
 
#ifndef __ppc64_THREAD_H__
#define __ppc64_THREAD_H__
#ifndef KERN_ppc64_THREAD_H_
#define KERN_ppc64_THREAD_H_
 
typedef struct {
} thread_arch_t;
 
#define thr_constructor_arch(t)
#define thr_destructor_arch(t)
#define thread_create_arch(t)
 
#endif
 
/** @}
/** @}
*/
 
/trunk/kernel/arch/mips32/include/proc/thread.h
26,22 → 26,24
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup mips32proc
/** @addtogroup mips32proc
* @{
*/
/** @file
*/
 
#ifndef __mips32_THREAD_H__
#define __mips32_THREAD_H__
#ifndef KERN_mips32_THREAD_H_
#define KERN_mips32_THREAD_H_
 
typedef struct {
} thread_arch_t;
 
#define thr_constructor_arch(t)
#define thr_destructor_arch(t)
#define thread_create_arch(t)
 
#endif
 
/** @}
/** @}
*/
 
/trunk/kernel/arch/ia32/include/proc/task.h
26,14 → 26,14
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup ia32proc
/** @addtogroup ia32proc
* @{
*/
/** @file
*/
 
#ifndef __ia32_TASK_H__
#define __ia32_TASK_H__
#ifndef KERN_ia32_TASK_H_
#define KERN_ia32_TASK_H_
 
#include <typedefs.h>
#include <arch/types.h>
46,6 → 46,5
 
#endif
 
/** @}
/** @}
*/
 
/trunk/kernel/arch/ia32/include/proc/thread.h
26,14 → 26,14
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup ia32proc
/** @addtogroup ia32proc
* @{
*/
/** @file
*/
 
#ifndef __ia32_THREAD_H__
#define __ia32_THREAD_H__
#ifndef KERN_ia32_THREAD_H_
#define KERN_ia32_THREAD_H_
 
#include <arch/types.h>
 
41,8 → 41,10
unative_t tls;
} thread_arch_t;
 
#define thr_constructor_arch(t)
#define thr_destructor_arch(t)
 
#endif
 
/** @}
/** @}
*/
 
/trunk/kernel/arch/ia32/src/proc/scheduler.c
26,7 → 26,7
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup ia32proc
/** @addtogroup ia32proc
* @{
*/
/** @file
77,6 → 77,5
{
}
 
/** @}
/** @}
*/
 
/trunk/kernel/arch/ia32/src/proc/task.c
26,7 → 26,7
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup ia32proc
/** @addtogroup ia32proc
* @{
*/
/** @file
57,6 → 57,5
free(t->arch.iomap.map);
}
 
/** @}
/** @}
*/
 
/trunk/kernel/arch/ia32/src/proc/thread.c
26,7 → 26,7
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
 
/** @addtogroup ia32proc
/** @addtogroup ia32proc
* @{
*/
/** @file
43,6 → 43,6
t->arch.tls = 0;
}
 
/** @}
/** @}
*/