Subversion Repositories HelenOS

Rev

Rev 1965 | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 1965 Rev 1968
1
/*
1
/*
2
 * Copyright (C) 2001-2004 Jakub Jermar
2
 * Copyright (C) 2001-2004 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
#ifndef __THREAD_H__
29
#ifndef __THREAD_H__
30
#define __THREAD_H__
30
#define __THREAD_H__
31
 
31
 
32
#include <arch/proc/thread.h>
32
#include <arch/proc/thread.h>
33
#include <synch/spinlock.h>
33
#include <synch/spinlock.h>
34
#include <arch/context.h>
34
#include <arch/context.h>
35
#include <fpu_context.h>
35
#include <fpu_context.h>
36
#include <arch/types.h>
36
#include <arch/types.h>
37
#include <typedefs.h>
37
#include <typedefs.h>
38
#include <time/timeout.h>
38
#include <time/timeout.h>
39
#include <synch/rwlock.h>
39
#include <synch/rwlock.h>
40
#include <synch/synch.h>
40
#include <synch/synch.h>
41
#include <config.h>
41
#include <config.h>
42
#include <adt/btree.h>
42
#include <adt/btree.h>
43
#include <adt/list.h>
43
#include <adt/list.h>
44
#include <mm/slab.h>
44
#include <mm/slab.h>
45
#include <proc/uarg.h>
45
#include <proc/uarg.h>
46
 
46
 
47
#define THREAD_STACK_SIZE   STACK_SIZE
47
#define THREAD_STACK_SIZE   STACK_SIZE
48
 
48
 
49
/**< Thread states. */
49
/**< Thread states. */
50
enum state {
50
enum state {
51
    Invalid,    /**< It is an error, if thread is found in this state. */
51
    Invalid,    /**< It is an error, if thread is found in this state. */
52
    Running,    /**< State of a thread that is currently executing on some CPU. */
52
    Running,    /**< State of a thread that is currently executing on some CPU. */
53
    Sleeping,   /**< Thread in this state is waiting for an event. */
53
    Sleeping,   /**< Thread in this state is waiting for an event. */
54
    Ready,      /**< State of threads in a run queue. */
54
    Ready,      /**< State of threads in a run queue. */
55
    Entering,   /**< Threads are in this state before they are first readied. */
55
    Entering,   /**< Threads are in this state before they are first readied. */
56
    Exiting,    /**< After a thread calls thread_exit(), it is put into Exiting state. */
56
    Exiting,    /**< After a thread calls thread_exit(), it is put into Exiting state. */
57
    Undead      /**< Threads that were not detached but exited are in the Undead state. */
57
    Undead      /**< Threads that were not detached but exited are in the Undead state. */
58
};
58
};
59
 
59
 
60
extern char *thread_states[];
60
extern char *thread_states[];
61
 
61
 
62
/**< Join types. */
62
/**< Join types. */
63
typedef enum {
63
typedef enum {
64
    None,
64
    None,
65
    TaskClnp,   /**< The thread will be joined by ktaskclnp thread. */
65
    TaskClnp,   /**< The thread will be joined by ktaskclnp thread. */
66
    TaskGC      /**< The thread will be joined by ktaskgc thread. */
66
    TaskGC      /**< The thread will be joined by ktaskgc thread. */
67
} thread_join_type_t;
67
} thread_join_type_t;
68
 
68
 
69
#define X_WIRED     (1<<0)
69
#define X_WIRED     (1<<0)
70
#define X_STOLEN    (1<<1)
70
#define X_STOLEN    (1<<1)
71
 
71
 
72
#define THREAD_NAME_BUFLEN  20
72
#define THREAD_NAME_BUFLEN  20
73
 
73
 
74
/** Thread structure. There is one per thread. */
74
/** Thread structure. There is one per thread. */
75
struct thread {
75
struct thread {
76
    link_t rq_link;             /**< Run queue link. */
76
    link_t rq_link;             /**< Run queue link. */
77
    link_t wq_link;             /**< Wait queue link. */
77
    link_t wq_link;             /**< Wait queue link. */
78
    link_t th_link;             /**< Links to threads within containing task. */
78
    link_t th_link;             /**< Links to threads within containing task. */
79
   
79
   
80
    /** Lock protecting thread structure.
80
    /** Lock protecting thread structure.
81
     *
81
     *
82
     * Protects the whole thread structure except list links above.
82
     * Protects the whole thread structure except list links above.
83
     */
83
     */
84
    SPINLOCK_DECLARE(lock);
84
    SPINLOCK_DECLARE(lock);
85
 
85
 
86
    char name[THREAD_NAME_BUFLEN];
86
    char name[THREAD_NAME_BUFLEN];
87
 
87
 
88
    void (* thread_code)(void *);       /**< Function implementing the thread. */
88
    void (* thread_code)(void *);       /**< Function implementing the thread. */
89
    void *thread_arg;           /**< Argument passed to thread_code() function. */
89
    void *thread_arg;           /**< Argument passed to thread_code() function. */
90
 
90
 
91
    /** From here, the stored context is restored when the thread is scheduled. */
91
    /** From here, the stored context is restored when the thread is scheduled. */
92
    context_t saved_context;
92
    context_t saved_context;
93
    /** From here, the stored timeout context is restored when sleep times out. */
93
    /** From here, the stored timeout context is restored when sleep times out. */
94
    context_t sleep_timeout_context;
94
    context_t sleep_timeout_context;
95
    /** From here, the stored interruption context is restored when sleep is interrupted. */
95
    /** From here, the stored interruption context is restored when sleep is interrupted. */
96
    context_t sleep_interruption_context;
96
    context_t sleep_interruption_context;
97
 
97
 
98
    bool sleep_interruptible;       /**< If true, the thread can be interrupted from sleep. */
98
    bool sleep_interruptible;       /**< If true, the thread can be interrupted from sleep. */
99
    waitq_t *sleep_queue;           /**< Wait queue in which this thread sleeps. */
99
    waitq_t *sleep_queue;           /**< Wait queue in which this thread sleeps. */
100
    timeout_t sleep_timeout;        /**< Timeout used for timeoutable sleeping.  */
100
    timeout_t sleep_timeout;        /**< Timeout used for timeoutable sleeping.  */
101
    volatile int timeout_pending;       /**< Flag signalling sleep timeout in progress. */
101
    volatile int timeout_pending;       /**< Flag signalling sleep timeout in progress. */
102
 
102
 
103
    /** True if this thread is executing copy_from_uspace(). False otherwise. */
103
    /** True if this thread is executing copy_from_uspace(). False otherwise. */
104
    bool in_copy_from_uspace;
104
    bool in_copy_from_uspace;
105
    /** True if this thread is executing copy_to_uspace(). False otherwise. */
105
    /** True if this thread is executing copy_to_uspace(). False otherwise. */
106
    bool in_copy_to_uspace;
106
    bool in_copy_to_uspace;
107
   
107
   
108
    /**
108
    /**
109
     * If true, the thread will not go to sleep at all and will
109
     * If true, the thread will not go to sleep at all and will
110
     * call thread_exit() before returning to userspace.
110
     * call thread_exit() before returning to userspace.
111
     */
111
     */
112
    bool interrupted;          
112
    bool interrupted;          
113
   
113
   
114
    thread_join_type_t  join_type;  /**< Who joinins the thread. */
114
    thread_join_type_t  join_type;  /**< Who joinins the thread. */
115
    bool detached;              /**< If true, thread_join_timeout() cannot be used on this thread. */
115
    bool detached;              /**< If true, thread_join_timeout() cannot be used on this thread. */
116
    waitq_t join_wq;            /**< Waitq for thread_join_timeout(). */
116
    waitq_t join_wq;            /**< Waitq for thread_join_timeout(). */
117
 
117
 
118
    fpu_context_t *saved_fpu_context;
118
    fpu_context_t *saved_fpu_context;
119
    int fpu_context_exists;
119
    int fpu_context_exists;
120
 
120
 
121
    /*
121
    /*
122
     * Defined only if thread doesn't run.
122
     * Defined only if thread doesn't run.
123
     * It means that fpu context is in CPU that last time executes this thread.
123
     * It means that fpu context is in CPU that last time executes this thread.
124
     * This disables migration
124
     * This disables migration
125
     */
125
     */
126
    int fpu_context_engaged;
126
    int fpu_context_engaged;
127
 
127
 
128
    rwlock_type_t rwlock_holder_type;
128
    rwlock_type_t rwlock_holder_type;
129
 
129
 
130
    void (* call_me)(void *);       /**< Funtion to be called in scheduler before the thread is put asleep. */
130
    void (* call_me)(void *);       /**< Funtion to be called in scheduler before the thread is put asleep. */
131
    void *call_me_with;         /**< Argument passed to call_me(). */
131
    void *call_me_with;         /**< Argument passed to call_me(). */
132
 
132
 
133
    state_t state;              /**< Thread's state. */
133
    state_t state;              /**< Thread's state. */
134
    int flags;              /**< Thread's flags. */
134
    int flags;              /**< Thread's flags. */
135
   
135
   
136
    cpu_t *cpu;             /**< Thread's CPU. */
136
    cpu_t *cpu;             /**< Thread's CPU. */
137
    task_t *task;               /**< Containing task. */
137
    task_t *task;               /**< Containing task. */
138
 
138
 
139
    __u64 ticks;                /**< Ticks before preemption. */
139
    __u64 ticks;                /**< Ticks before preemption. */
140
 
140
 
141
    int priority;               /**< Thread's priority. Implemented as index to CPU->rq */
141
    int priority;               /**< Thread's priority. Implemented as index to CPU->rq */
142
    __u32 tid;              /**< Thread ID. */
142
    __u32 tid;              /**< Thread ID. */
143
   
143
   
144
    thread_arch_t arch;         /**< Architecture-specific data. */
144
    thread_arch_t arch;         /**< Architecture-specific data. */
145
 
145
 
146
    __u8 *kstack;               /**< Thread's kernel stack. */
146
    __u8 *kstack;               /**< Thread's kernel stack. */
147
};
147
};
148
 
148
 
149
/** Thread list lock.
149
/** Thread list lock.
150
 *
150
 *
151
 * This lock protects all link_t structures chained in threads_head.
151
 * This lock protects all link_t structures chained in threads_head.
152
 * Must be acquired before T.lock for each T of type thread_t.
152
 * Must be acquired before T.lock for each T of type thread_t.
153
 *
153
 *
154
 */
154
 */
155
extern spinlock_t threads_lock;
155
extern spinlock_t threads_lock;
156
 
156
 
157
extern btree_t threads_btree;           /**< B+tree containing all threads. */
157
extern btree_t threads_btree;           /**< B+tree containing all threads. */
158
 
158
 
159
extern void thread_init(void);
159
extern void thread_init(void);
160
extern thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags, char *name);
160
extern thread_t *thread_create(void (* func)(void *), void *arg, task_t *task, int flags, char *name);
161
extern void thread_ready(thread_t *t);
161
extern void thread_ready(thread_t *t);
162
extern void thread_exit(void) __attribute__((noreturn));
162
extern void thread_exit(void) __attribute__((noreturn));
163
 
163
 
164
#ifndef thread_create_arch
164
#ifndef thread_create_arch
165
extern void thread_create_arch(thread_t *t);
165
extern void thread_create_arch(thread_t *t);
166
#endif
166
#endif
167
 
167
 
168
extern void thread_sleep(__u32 sec);
168
extern void thread_sleep(__u32 sec);
169
extern void thread_usleep(__u32 usec);
169
extern void thread_usleep(__u32 usec);
170
 
170
 
171
#define thread_join(t)  thread_join_timeout((t), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE)
171
#define thread_join(t)  thread_join_timeout((t), SYNCH_NO_TIMEOUT, SYNCH_FLAGS_NONE)
172
extern int thread_join_timeout(thread_t *t, __u32 usec, int flags);
172
extern int thread_join_timeout(thread_t *t, __u32 usec, int flags);
173
extern void thread_detach(thread_t *t);
173
extern void thread_detach(thread_t *t);
174
 
174
 
175
extern void thread_register_call_me(void (* call_me)(void *), void *call_me_with);
175
extern void thread_register_call_me(void (* call_me)(void *), void *call_me_with);
176
extern void thread_print_list(void);
176
extern void thread_print_list(void);
177
extern void thread_destroy(thread_t *t);
177
extern void thread_destroy(thread_t *t);
178
extern bool thread_exists(thread_t *t);
178
extern bool thread_exists(thread_t *t);
179
 
179
 
180
/* Fpu context slab cache */
180
/* Fpu context slab cache */
181
extern slab_cache_t *fpu_context_slab;
181
extern slab_cache_t *fpu_context_slab;
182
 
182
 
183
/** Thread syscall prototypes. */
183
/** Thread syscall prototypes. */
184
__native sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name);
184
__native sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name);
185
__native sys_thread_exit(int uspace_status);
185
__native sys_thread_exit(int uspace_status);
186
 
186
 
187
#endif
187
#endif
188
 
188