Subversion Repositories HelenOS

Rev

Rev 814 | Rev 1062 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
1 jermar 1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
29
#ifndef __THREAD_H__
30
#define __THREAD_H__
31
 
32
#include <arch/thread.h>
33
#include <synch/spinlock.h>
34
#include <arch/context.h>
56 vana 35
#include <fpu_context.h>
1 jermar 36
#include <arch/types.h>
37
#include <typedefs.h>
38
#include <time/timeout.h>
39
#include <synch/rwlock.h>
173 jermar 40
#include <config.h>
788 jermar 41
#include <adt/list.h>
906 palkovsky 42
#include <mm/slab.h>
1 jermar 43
 
173 jermar 44
#define THREAD_STACK_SIZE	STACK_SIZE
1 jermar 45
 
46
#define THREAD_USER_STACK	1
47
 
48
enum state {
410 jermar 49
	Invalid,	/**< It is an error, if thread is found in this state. */
50
	Running,	/**< State of a thread that is currently executing on some CPU. */
51
	Sleeping,	/**< Thread in this state is waiting for an event. */
52
	Ready,		/**< State of threads in a run queue. */
53
	Entering,	/**< Threads are in this state before they are first readied. */
54
	Exiting		/**< After a thread calls thread_exit(), it is put into Exiting state. */
1 jermar 55
};
56
 
57
extern char *thread_states[];
58
 
59
#define X_WIRED		(1<<0)
60
#define X_STOLEN	(1<<1)
61
 
62
struct thread {
410 jermar 63
	link_t rq_link;				/**< Run queue link. */
64
	link_t wq_link;				/**< Wait queue link. */
65
	link_t th_link;				/**< Links to threads within containing task. */
66
	link_t threads_link;			/**< Link to the list of all threads. */
1 jermar 67
 
557 jermar 68
	/** Lock protecting thread structure.
69
	 *
70
	 * Protects the whole thread structure except list links above.
71
	 * Must be acquired before T.lock for each T of type task_t.
72
	 * 
73
	 */
623 jermar 74
	SPINLOCK_DECLARE(lock);
1 jermar 75
 
410 jermar 76
	void (* thread_code)(void *);		/**< Function implementing the thread. */
77
	void *thread_arg;			/**< Argument passed to thread_code() function. */
1 jermar 78
 
410 jermar 79
	context_t saved_context;		/**< From here, the stored context is restored when the thread is scheduled. */
80
	context_t sleep_timeout_context;	/**< From here, the stored failover context is restored when sleep times out. */
81
 
82
	waitq_t *sleep_queue;			/**< Wait queue in which this thread sleeps. */
83
	timeout_t sleep_timeout;		/**< Timeout used for timeoutable sleeping.  */
84
	volatile int timeout_pending;		/**< Flag signalling sleep timeout in progress. */
85
 
906 palkovsky 86
	fpu_context_t *saved_fpu_context;
125 jermar 87
	int fpu_context_exists;
88
 
89
	/*
90
	 * Defined only if thread doesn't run.
91
	 * It means that fpu context is in CPU that last time executes this thread.
92
	 * This disables migration
93
	 */
94
	int fpu_context_engaged;
95
 
1 jermar 96
	rwlock_type_t rwlock_holder_type;
97
 
410 jermar 98
	void (* call_me)(void *);		/**< Funtion to be called in scheduler before the thread is put asleep. */
99
	void *call_me_with;			/**< Argument passed to call_me(). */
100
 
101
	state_t state;				/**< Thread's state. */
102
	int flags;				/**< Thread's flags. */
1 jermar 103
 
410 jermar 104
	cpu_t *cpu;				/**< Thread's CPU. */
105
	task_t *task;				/**< Containing task. */
1 jermar 106
 
410 jermar 107
	__u64 ticks;				/**< Ticks before preemption. */
1 jermar 108
 
413 jermar 109
	int priority;				/**< Thread's priority. Implemented as index to CPU->rq */
410 jermar 110
	__u32 tid;				/**< Thread ID. */
1 jermar 111
 
410 jermar 112
	ARCH_THREAD_DATA;			/**< Architecture-specific data. */
1 jermar 113
 
410 jermar 114
	__u8 *kstack;				/**< Thread's kernel stack. */
1 jermar 115
};
116
 
557 jermar 117
/** Thread list lock.
118
 *
119
 * This lock protects all link_t structures chained in threads_head.
120
 * Must be acquired before T.lock for each T of type thread_t.
121
 *
122
 */
123
extern spinlock_t threads_lock;
124
 
410 jermar 125
extern link_t threads_head;			/**< List of all threads in the system. */
1 jermar 126
 
127
extern void thread_init(void);
128
extern thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags);
129
extern void thread_ready(thread_t *t);
130
extern void thread_exit(void);
131
 
132
extern void thread_sleep(__u32 sec);
133
extern void thread_usleep(__u32 usec);
134
 
135
extern void thread_register_call_me(void (* call_me)(void *), void *call_me_with);
777 palkovsky 136
extern void thread_print_list(void);
787 palkovsky 137
extern void thread_destroy(thread_t *t);
1 jermar 138
 
906 palkovsky 139
 
140
/* Fpu context slab cache */
141
extern slab_cache_t *fpu_context_slab;
142
 
1 jermar 143
#endif