Subversion Repositories HelenOS-historic

Compare Revisions

Ignore whitespace Rev 786 → Rev 787

/kernel/trunk/generic/include/print.h
30,6 → 30,7
#define __PRINT_H__
 
#include <arch/types.h>
#include <synch/spinlock.h>
 
#define INT8 1
#define INT16 2
38,4 → 39,7
 
extern void printf(const char *fmt, ...);
 
/* We need this address in spinlock to avoid deadlock in deadlock detection */
extern spinlock_t printflock;
 
#endif
/kernel/trunk/generic/include/proc/thread.h
134,5 → 134,6
 
extern void thread_register_call_me(void (* call_me)(void *), void *call_me_with);
extern void thread_print_list(void);
extern void thread_destroy(thread_t *t);
 
#endif
/kernel/trunk/generic/include/mm/slab.h
70,7 → 70,7
/* Configuration */
size_t size; /**< Size of SLAB position - align_up(sizeof(obj)) */
int (*constructor)(void *obj, int kmflag);
void (*destructor)(void *obj);
int (*destructor)(void *obj);
int flags; /**< Flags changing behaviour of cache */
 
/* Computed values */
103,7 → 103,7
size_t size,
size_t align,
int (*constructor)(void *obj, int kmflag),
void (*destructor)(void *obj),
int (*destructor)(void *obj),
int flags);
extern void slab_cache_destroy(slab_cache_t *cache);
 
/kernel/trunk/generic/src/proc/scheduler.c
237,6 → 237,7
* using new stack. Handling the actual context
* switch to a new thread.
*
* Assume THREAD->lock is held.
*/
static void scheduler_separated_stack(void)
{
253,33 → 254,9
break;
 
case Exiting:
frame_free((__address) THREAD->kstack);
if (THREAD->ustack) {
frame_free((__address) THREAD->ustack);
}
 
/*
* Detach from the containing task.
*/
spinlock_lock(&TASK->lock);
list_remove(&THREAD->th_link);
spinlock_unlock(&TASK->lock);
 
spinlock_unlock(&THREAD->lock);
spinlock_lock(&threads_lock);
list_remove(&THREAD->threads_link);
spinlock_unlock(&threads_lock);
 
spinlock_lock(&CPU->lock);
if(CPU->fpu_owner==THREAD)
CPU->fpu_owner=NULL;
spinlock_unlock(&CPU->lock);
 
free(THREAD);
 
thread_destroy(THREAD);
break;
case Sleeping:
/*
* Prefer the thread after it's woken up.
/kernel/trunk/generic/src/proc/thread.c
52,6 → 52,8
#include <arch/atomic.h>
#include <memstr.h>
#include <print.h>
#include <mm/slab.h>
#include <debug.h>
 
char *thread_states[] = {"Invalid", "Running", "Sleeping", "Ready", "Entering", "Exiting"}; /**< Thread states */
 
61,7 → 63,9
SPINLOCK_INITIALIZE(tidlock);
__u32 last_tid = 0;
 
static slab_cache_t *thread_slab;
 
 
/** Thread wrapper
*
* This wrapper is provided to ensure that every thread
87,7 → 91,33
/* not reached */
}
 
/** Initialization and allocation for thread_t structure */
static int thr_constructor(void *obj, int kmflags)
{
thread_t *t = (thread_t *)obj;
 
spinlock_initialize(&t->lock, "thread_t_lock");
link_initialize(&t->rq_link);
link_initialize(&t->wq_link);
link_initialize(&t->th_link);
link_initialize(&t->threads_link);
t->kstack = (__u8 *)frame_alloc(ONE_FRAME, FRAME_KA | kmflags);
if (!t->kstack)
return -1;
 
return 0;
}
 
/** Destruction of thread_t object */
static int thr_destructor(void *obj)
{
thread_t *t = (thread_t *)obj;
 
frame_free((__address) t->kstack);
return 1; /* One page freed */
}
 
/** Initialize threads
*
* Initialize kernel threads support.
97,6 → 127,9
{
THREAD = NULL;
atomic_set(&nrdy,0);
thread_slab = slab_cache_create("thread_slab",
sizeof(thread_t),0,
thr_constructor, thr_destructor, 0);
}
 
 
143,6 → 176,43
}
 
 
/** Destroy thread memory structure
*
* Detach thread from all queues, cpus etc. and destroy it.
*
* Assume thread->lock is held!!
*/
void thread_destroy(thread_t *t)
{
ASSERT(t->state == Exiting);
ASSERT(t->task);
ASSERT(t->cpu);
 
spinlock_lock(&t->cpu->lock);
if(t->cpu->fpu_owner==t)
t->cpu->fpu_owner=NULL;
spinlock_unlock(&t->cpu->lock);
 
if (t->ustack)
frame_free((__address) t->ustack);
/*
* Detach from the containing task.
*/
spinlock_lock(&t->task->lock);
list_remove(&t->th_link);
spinlock_unlock(&t->task->lock);
spinlock_unlock(&t->lock);
spinlock_lock(&threads_lock);
list_remove(&t->threads_link);
spinlock_unlock(&threads_lock);
slab_free(thread_slab, t);
}
 
 
/** Create new thread
*
* Create a new thread.
158,19 → 228,19
thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags)
{
thread_t *t;
__address frame_ks, frame_us = NULL;
__address frame_us = NULL;
 
t = (thread_t *) malloc(sizeof(thread_t));
t = (thread_t *) slab_alloc(thread_slab, 0);
if (t) {
ipl_t ipl;
spinlock_initialize(&t->lock, "thread_t_lock");
frame_ks = frame_alloc(ONE_FRAME, FRAME_KA);
if (THREAD_USER_STACK & flags) {
frame_us = frame_alloc(ONE_FRAME, FRAME_KA);
}
 
/* Not needed, but good for debugging */
memsetb((__address)t->kstack, THREAD_STACK_SIZE, 0);
 
ipl = interrupts_disable();
spinlock_lock(&tidlock);
t->tid = ++last_tid;
177,12 → 247,6
spinlock_unlock(&tidlock);
interrupts_restore(ipl);
memsetb(frame_ks, THREAD_STACK_SIZE, 0);
link_initialize(&t->rq_link);
link_initialize(&t->wq_link);
link_initialize(&t->th_link);
link_initialize(&t->threads_link);
t->kstack = (__u8 *) frame_ks;
t->ustack = (__u8 *) frame_us;
context_save(&t->saved_context);
218,7 → 282,7
/*
* Register this thread in the system-wide list.
*/
ipl = interrupts_disable();
ipl = interrupts_disable();
spinlock_lock(&threads_lock);
list_append(&t->threads_link, &threads_head);
spinlock_unlock(&threads_lock);
/kernel/trunk/generic/src/synch/spinlock.c
68,7 → 68,7
 
preemption_disable();
while (test_and_set(&sl->val)) {
if (i++ > 300000) {
if (i++ > 300000 && sl!=&printflock) {
printf("cpu%d: looping on spinlock %p:%s, caller=%p",
CPU->id, sl, sl->name, CALLER);
symbol = get_symtab_entry(CALLER);
/kernel/trunk/generic/src/mm/slab.c
228,6 → 228,8
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
slab_t *slab)
{
int freed = 0;
 
if (!slab)
slab = obj2slab(obj);
 
234,6 → 236,9
ASSERT(slab->cache == cache);
ASSERT(slab->available < cache->objects);
 
if (cache->destructor)
freed = cache->destructor(obj);
spinlock_lock(&cache->slablock);
 
*((int *)obj) = slab->nextavail;
246,7 → 251,7
list_remove(&slab->link);
spinlock_unlock(&cache->slablock);
 
return slab_space_free(cache, slab);
return freed + slab_space_free(cache, slab);
 
} else if (slab->available == 1) {
/* It was in full, move to partial */
254,7 → 259,7
list_prepend(&slab->link, &cache->partial_slabs);
}
spinlock_unlock(&cache->slablock);
return 0;
return freed;
}
 
/**
290,6 → 295,7
obj = slab->start + slab->nextavail * cache->size;
slab->nextavail = *((int *)obj);
slab->available--;
 
if (! slab->available)
list_prepend(&slab->link, &cache->full_slabs);
else
296,6 → 302,12
list_prepend(&slab->link, &cache->partial_slabs);
 
spinlock_unlock(&cache->slablock);
 
if (cache->constructor && cache->constructor(obj, flags)) {
/* Bad, bad, construction failed */
slab_obj_destroy(cache, obj, slab);
return NULL;
}
return obj;
}
 
531,7 → 543,7
size_t size,
size_t align,
int (*constructor)(void *obj, int kmflag),
void (*destructor)(void *obj),
int (*destructor)(void *obj),
int flags)
{
int i;
596,7 → 608,7
size_t size,
size_t align,
int (*constructor)(void *obj, int kmflag),
void (*destructor)(void *obj),
int (*destructor)(void *obj),
int flags)
{
slab_cache_t *cache;