Subversion Repositories HelenOS

Rev

Rev 2092 | Rev 2096 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 2092 Rev 2093
1
/*
1
/*
2
 * Copyright (c) 2001-2004 Jakub Jermar
2
 * Copyright (c) 2001-2004 Jakub Jermar
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/** @addtogroup genericproc
29
/** @addtogroup genericproc
30
 * @{
30
 * @{
31
 */
31
 */
32
/** @file
32
/** @file
33
 */
33
 */
34
 
34
 
35
#ifndef KERN_TASK_H_
35
#ifndef KERN_TASK_H_
36
#define KERN_TASK_H_
36
#define KERN_TASK_H_
37
 
37
 
38
#include <cpu.h>
38
#include <cpu.h>
39
#include <synch/spinlock.h>
39
#include <synch/spinlock.h>
40
#include <synch/mutex.h>
40
#include <synch/mutex.h>
41
#include <synch/rwlock.h>
41
#include <synch/rwlock.h>
42
#include <synch/futex.h>
42
#include <synch/futex.h>
43
#include <adt/btree.h>
43
#include <adt/btree.h>
44
#include <adt/list.h>
44
#include <adt/list.h>
45
#include <security/cap.h>
45
#include <security/cap.h>
46
#include <arch/proc/task.h>
46
#include <arch/proc/task.h>
47
#include <arch/proc/thread.h>
47
#include <arch/proc/thread.h>
48
#include <arch/context.h>
48
#include <arch/context.h>
49
#include <arch/fpu_context.h>
49
#include <arch/fpu_context.h>
50
#include <arch/cpu.h>
50
#include <arch/cpu.h>
51
#include <mm/tlb.h>
51
#include <mm/tlb.h>
52
#include <proc/scheduler.h>
52
#include <proc/scheduler.h>
53
 
53
 
54
#define IPC_MAX_PHONES  16
54
#define IPC_MAX_PHONES  16
55
#define THREAD_NAME_BUFLEN  20
55
#define THREAD_NAME_BUFLEN  20
56
 
56
 
57
struct answerbox;
57
struct answerbox;
58
struct task;
58
struct task;
59
struct thread;
59
struct thread;
60
 
60
 
61
typedef enum {
61
typedef enum {
62
    IPC_PHONE_FREE = 0,     /**< Phone is free and can be allocated */
62
    IPC_PHONE_FREE = 0,     /**< Phone is free and can be allocated */
63
    IPC_PHONE_CONNECTING,   /**< Phone is connecting somewhere */
63
    IPC_PHONE_CONNECTING,   /**< Phone is connecting somewhere */
64
    IPC_PHONE_CONNECTED,    /**< Phone is connected */
64
    IPC_PHONE_CONNECTED,    /**< Phone is connected */
65
    IPC_PHONE_HUNGUP,   /**< Phone is hung up, waiting for answers to come */
65
    IPC_PHONE_HUNGUP,   /**< Phone is hung up, waiting for answers to come */
66
    IPC_PHONE_SLAMMED       /**< Phone was hungup from server */
66
    IPC_PHONE_SLAMMED       /**< Phone was hungup from server */
67
} ipc_phone_state_t;
67
} ipc_phone_state_t;
68
 
68
 
69
/** Structure identifying phone (in TASK structure) */
69
/** Structure identifying phone (in TASK structure) */
70
typedef struct {
70
typedef struct {
71
    SPINLOCK_DECLARE(lock);
71
    SPINLOCK_DECLARE(lock);
72
    link_t link;
72
    link_t link;
73
    struct answerbox *callee;
73
    struct answerbox *callee;
74
    ipc_phone_state_t state;
74
    ipc_phone_state_t state;
75
    atomic_t active_calls;
75
    atomic_t active_calls;
76
} phone_t;
76
} phone_t;
77
 
77
 
78
typedef struct answerbox {
78
typedef struct answerbox {
79
    SPINLOCK_DECLARE(lock);
79
    SPINLOCK_DECLARE(lock);
80
 
80
 
81
    struct task *task;
81
    struct task *task;
82
 
82
 
83
    waitq_t wq;
83
    waitq_t wq;
84
 
84
 
85
    link_t connected_phones;    /**< Phones connected to this answerbox */
85
    link_t connected_phones;    /**< Phones connected to this answerbox */
86
    link_t calls;           /**< Received calls */
86
    link_t calls;           /**< Received calls */
87
    link_t dispatched_calls;    /* Should be hash table in the future */
87
    link_t dispatched_calls;    /* Should be hash table in the future */
88
 
88
 
89
    link_t answers;         /**< Answered calls */
89
    link_t answers;         /**< Answered calls */
90
 
90
 
91
    SPINLOCK_DECLARE(irq_lock);
91
    SPINLOCK_DECLARE(irq_lock);
92
    link_t irq_notifs;          /**< Notifications from IRQ handlers */
92
    link_t irq_notifs;          /**< Notifications from IRQ handlers */
93
    link_t irq_head;        /**< IRQs with notifications to this answerbox. */
93
    link_t irq_head;        /**< IRQs with notifications to this answerbox. */
94
} answerbox_t;
94
} answerbox_t;
95
 
95
 
96
/** Task structure. */
96
/** Task structure. */
97
typedef struct task {
97
typedef struct task {
98
    /** Task lock.
98
    /** Task lock.
99
     *
99
     *
100
     * Must be acquired before threads_lock and thread lock of any of its threads.
100
     * Must be acquired before threads_lock and thread lock of any of its threads.
101
     */
101
     */
102
    SPINLOCK_DECLARE(lock);
102
    SPINLOCK_DECLARE(lock);
103
   
103
   
104
    char *name;
104
    char *name;
105
    struct thread *main_thread; /**< Pointer to the main thread. */
105
    struct thread *main_thread; /**< Pointer to the main thread. */
106
    link_t th_head;     /**< List of threads contained in this task. */
106
    link_t th_head;     /**< List of threads contained in this task. */
107
    as_t *as;       /**< Address space. */
107
    as_t *as;       /**< Address space. */
108
    task_id_t taskid;   /**< Unique identity of task */
108
    task_id_t taskid;   /**< Unique identity of task */
109
    context_id_t context;   /**< Task security context */
109
    context_id_t context;   /**< Task security context */
110
 
110
 
111
    /** If this is true, new threads can become part of the task. */
111
    /** If this is true, new threads can become part of the task. */
112
    bool accept_new_threads;
112
    bool accept_new_threads;
113
 
113
 
114
    count_t refcount;   /**< Number of references (i.e. threads). */
114
    count_t refcount;   /**< Number of references (i.e. threads). */
115
 
115
 
116
    cap_t capabilities; /**< Task capabilities. */
116
    cap_t capabilities; /**< Task capabilities. */
117
 
117
 
118
    /* IPC stuff */
118
    /* IPC stuff */
119
    answerbox_t answerbox;  /**< Communication endpoint */
119
    answerbox_t answerbox;  /**< Communication endpoint */
120
    phone_t phones[IPC_MAX_PHONES];
120
    phone_t phones[IPC_MAX_PHONES];
121
    atomic_t active_calls;  /**< Active asynchronous messages.
121
    atomic_t active_calls;  /**< Active asynchronous messages.
122
                 *   It is used for limiting uspace to
122
                 *   It is used for limiting uspace to
123
                 *   certain extent. */
123
                 *   certain extent. */
124
   
124
   
125
    task_arch_t arch;   /**< Architecture specific task data. */
125
    task_arch_t arch;   /**< Architecture specific task data. */
126
   
126
   
127
    /**
127
    /**
128
     * Serializes access to the B+tree of task's futexes. This mutex is
128
     * Serializes access to the B+tree of task's futexes. This mutex is
129
     * independent on the task spinlock.
129
     * independent on the task spinlock.
130
     */
130
     */
131
    mutex_t futexes_lock;
131
    mutex_t futexes_lock;
132
    btree_t futexes;    /**< B+tree of futexes referenced by this task. */
132
    btree_t futexes;    /**< B+tree of futexes referenced by this task. */
133
   
133
   
134
    uint64_t cycles;    /**< Accumulated accounting. */
134
    uint64_t cycles;    /**< Accumulated accounting. */
135
} task_t;
135
} task_t;
136
 
136
 
137
typedef void (* timeout_handler_t)(void *arg);
137
typedef void (* timeout_handler_t)(void *arg);
138
 
138
 
139
typedef struct {
139
typedef struct {
140
    SPINLOCK_DECLARE(lock);
140
    SPINLOCK_DECLARE(lock);
141
 
141
 
142
    link_t link;            /**< Link to the list of active timeouts on THE->cpu */
142
    link_t link;            /**< Link to the list of active timeouts on THE->cpu */
143
   
143
   
144
    uint64_t ticks;         /**< Timeout will be activated in this amount of clock() ticks. */
144
    uint64_t ticks;         /**< Timeout will be activated in this amount of clock() ticks. */
145
 
145
 
146
    timeout_handler_t handler;  /**< Function that will be called on timeout activation. */
146
    timeout_handler_t handler;  /**< Function that will be called on timeout activation. */
147
    void *arg;          /**< Argument to be passed to handler() function. */
147
    void *arg;          /**< Argument to be passed to handler() function. */
148
   
148
   
149
    cpu_t *cpu;         /**< On which processor is this timeout registered. */
149
    cpu_t *cpu;         /**< On which processor is this timeout registered. */
150
} timeout_t;
150
} timeout_t;
151
 
151
 
152
/** Thread states. */
152
/** Thread states. */
153
typedef enum {
153
typedef enum {
154
    Invalid,    /**< It is an error, if thread is found in this state. */
154
    Invalid,    /**< It is an error, if thread is found in this state. */
155
    Running,    /**< State of a thread that is currently executing on some CPU. */
155
    Running,    /**< State of a thread that is currently executing on some CPU. */
156
    Sleeping,   /**< Thread in this state is waiting for an event. */
156
    Sleeping,   /**< Thread in this state is waiting for an event. */
157
    Ready,      /**< State of threads in a run queue. */
157
    Ready,      /**< State of threads in a run queue. */
158
    Entering,   /**< Threads are in this state before they are first readied. */
158
    Entering,   /**< Threads are in this state before they are first readied. */
159
    Exiting,    /**< After a thread calls thread_exit(), it is put into Exiting state. */
159
    Exiting,    /**< After a thread calls thread_exit(), it is put into Exiting state. */
160
    Undead      /**< Threads that were not detached but exited are in the Undead state. */
160
    Undead      /**< Threads that were not detached but exited are in the Undead state. */
161
} state_t;
161
} state_t;
162
 
162
 
163
/** Join types. */
163
/** Join types. */
164
typedef enum {
164
typedef enum {
165
    None,
165
    None,
166
    TaskClnp,   /**< The thread will be joined by ktaskclnp thread. */
166
    TaskClnp,   /**< The thread will be joined by ktaskclnp thread. */
167
    TaskGC      /**< The thread will be joined by ktaskgc thread. */
167
    TaskGC      /**< The thread will be joined by ktaskgc thread. */
168
} thread_join_type_t;
168
} thread_join_type_t;
169
 
169
 
170
/** Thread structure. There is one per thread. */
170
/** Thread structure. There is one per thread. */
171
typedef struct thread {
171
typedef struct thread {
172
    link_t rq_link;             /**< Run queue link. */
172
    link_t rq_link;             /**< Run queue link. */
173
    link_t wq_link;             /**< Wait queue link. */
173
    link_t wq_link;             /**< Wait queue link. */
174
    link_t th_link;             /**< Links to threads within containing task. */
174
    link_t th_link;             /**< Links to threads within containing task. */
175
   
175
   
176
    /** Lock protecting thread structure.
176
    /** Lock protecting thread structure.
177
     *
177
     *
178
     * Protects the whole thread structure except list links above.
178
     * Protects the whole thread structure except list links above.
179
     */
179
     */
180
    SPINLOCK_DECLARE(lock);
180
    SPINLOCK_DECLARE(lock);
181
 
181
 
182
    char name[THREAD_NAME_BUFLEN];
182
    char name[THREAD_NAME_BUFLEN];
183
 
183
 
184
    void (* thread_code)(void *);       /**< Function implementing the thread. */
184
    void (* thread_code)(void *);       /**< Function implementing the thread. */
185
    void *thread_arg;           /**< Argument passed to thread_code() function. */
185
    void *thread_arg;           /**< Argument passed to thread_code() function. */
186
 
186
 
187
    /** From here, the stored context is restored when the thread is scheduled. */
187
    /** From here, the stored context is restored when the thread is scheduled. */
188
    context_t saved_context;
188
    context_t saved_context;
189
    /** From here, the stored timeout context is restored when sleep times out. */
189
    /** From here, the stored timeout context is restored when sleep times out. */
190
    context_t sleep_timeout_context;
190
    context_t sleep_timeout_context;
191
    /** From here, the stored interruption context is restored when sleep is interrupted. */
191
    /** From here, the stored interruption context is restored when sleep is interrupted. */
192
    context_t sleep_interruption_context;
192
    context_t sleep_interruption_context;
193
 
193
 
194
    bool sleep_interruptible;       /**< If true, the thread can be interrupted from sleep. */
194
    bool sleep_interruptible;       /**< If true, the thread can be interrupted from sleep. */
195
    waitq_t *sleep_queue;           /**< Wait queue in which this thread sleeps. */
195
    waitq_t *sleep_queue;           /**< Wait queue in which this thread sleeps. */
196
    timeout_t sleep_timeout;        /**< Timeout used for timeoutable sleeping.  */
196
    timeout_t sleep_timeout;        /**< Timeout used for timeoutable sleeping.  */
197
    volatile int timeout_pending;       /**< Flag signalling sleep timeout in progress. */
197
    volatile int timeout_pending;       /**< Flag signalling sleep timeout in progress. */
198
 
198
 
199
    /** True if this thread is executing copy_from_uspace(). False otherwise. */
199
    /** True if this thread is executing copy_from_uspace(). False otherwise. */
200
    bool in_copy_from_uspace;
200
    bool in_copy_from_uspace;
201
    /** True if this thread is executing copy_to_uspace(). False otherwise. */
201
    /** True if this thread is executing copy_to_uspace(). False otherwise. */
202
    bool in_copy_to_uspace;
202
    bool in_copy_to_uspace;
203
   
203
   
204
    /**
204
    /**
205
     * If true, the thread will not go to sleep at all and will
205
     * If true, the thread will not go to sleep at all and will
206
     * call thread_exit() before returning to userspace.
206
     * call thread_exit() before returning to userspace.
207
     */
207
     */
208
    bool interrupted;          
208
    bool interrupted;          
209
   
209
   
210
    thread_join_type_t  join_type;  /**< Who joinins the thread. */
210
    thread_join_type_t  join_type;  /**< Who joinins the thread. */
211
    bool detached;              /**< If true, thread_join_timeout() cannot be used on this thread. */
211
    bool detached;              /**< If true, thread_join_timeout() cannot be used on this thread. */
212
    waitq_t join_wq;            /**< Waitq for thread_join_timeout(). */
212
    waitq_t join_wq;            /**< Waitq for thread_join_timeout(). */
213
 
213
 
214
    fpu_context_t *saved_fpu_context;
214
    fpu_context_t *saved_fpu_context;
215
    int fpu_context_exists;
215
    int fpu_context_exists;
216
 
216
 
217
    /*
217
    /*
218
     * Defined only if thread doesn't run.
218
     * Defined only if thread doesn't run.
219
     * It means that fpu context is in CPU that last time executes this thread.
219
     * It means that fpu context is in CPU that last time executes this thread.
220
     * This disables migration.
220
     * This disables migration.
221
     */
221
     */
222
    int fpu_context_engaged;
222
    int fpu_context_engaged;
223
 
223
 
224
    rwlock_type_t rwlock_holder_type;
224
    rwlock_type_t rwlock_holder_type;
225
 
225
 
226
    void (* call_me)(void *);       /**< Funtion to be called in scheduler before the thread is put asleep. */
226
    void (* call_me)(void *);       /**< Funtion to be called in scheduler before the thread is put asleep. */
227
    void *call_me_with;         /**< Argument passed to call_me(). */
227
    void *call_me_with;         /**< Argument passed to call_me(). */
228
 
228
 
229
    state_t state;              /**< Thread's state. */
229
    state_t state;              /**< Thread's state. */
230
    int flags;              /**< Thread's flags. */
230
    int flags;              /**< Thread's flags. */
231
   
231
   
232
    cpu_t *cpu;             /**< Thread's CPU. */
232
    cpu_t *cpu;             /**< Thread's CPU. */
233
    task_t *task;               /**< Containing task. */
233
    task_t *task;               /**< Containing task. */
234
 
234
 
235
    uint64_t ticks;             /**< Ticks before preemption. */
235
    uint64_t ticks;             /**< Ticks before preemption. */
236
   
236
   
237
    uint64_t cycles;            /**< Thread accounting. */
237
    uint64_t cycles;            /**< Thread accounting. */
238
    uint64_t last_cycle;        /**< Last sampled cycle. */
238
    uint64_t last_cycle;        /**< Last sampled cycle. */
239
    bool uncounted;             /**< Thread doesn't affect accumulated accounting. */
239
    bool uncounted;             /**< Thread doesn't affect accumulated accounting. */
240
 
240
 
241
    int priority;               /**< Thread's priority. Implemented as index to CPU->rq */
241
    int priority;               /**< Thread's priority. Implemented as index to CPU->rq */
242
    uint32_t tid;               /**< Thread ID. */
242
    uint32_t tid;               /**< Thread ID. */
243
   
243
   
244
    thread_arch_t arch;         /**< Architecture-specific data. */
244
    thread_arch_t arch;         /**< Architecture-specific data. */
245
 
245
 
246
    uint8_t *kstack;            /**< Thread's kernel stack. */
246
    uint8_t *kstack;            /**< Thread's kernel stack. */
247
} thread_t;
247
} thread_t;
248
 
248
 
249
extern spinlock_t tasks_lock;
249
SPINLOCK_EXTERN(tasks_lock);
250
extern btree_t tasks_btree;
250
extern btree_t tasks_btree;
251
 
251
 
252
extern void task_init(void);
252
extern void task_init(void);
253
extern task_t *task_create(as_t *as, char *name);
253
extern task_t *task_create(as_t *as, char *name);
254
extern void task_destroy(task_t *t);
254
extern void task_destroy(task_t *t);
255
extern task_t *task_run_program(void *program_addr, char *name);
255
extern task_t *task_run_program(void *program_addr, char *name);
256
extern task_t *task_find_by_id(task_id_t id);
256
extern task_t *task_find_by_id(task_id_t id);
257
extern int task_kill(task_id_t id);
257
extern int task_kill(task_id_t id);
258
extern uint64_t task_get_accounting(task_t *t);
258
extern uint64_t task_get_accounting(task_t *t);
259
 
259
 
260
extern void cap_set(task_t *t, cap_t caps);
260
extern void cap_set(task_t *t, cap_t caps);
261
extern cap_t cap_get(task_t *t);
261
extern cap_t cap_get(task_t *t);
262
 
262
 
263
 
263
 
264
#ifndef task_create_arch
264
#ifndef task_create_arch
265
extern void task_create_arch(task_t *t);
265
extern void task_create_arch(task_t *t);
266
#endif
266
#endif
267
 
267
 
268
#ifndef task_destroy_arch
268
#ifndef task_destroy_arch
269
extern void task_destroy_arch(task_t *t);
269
extern void task_destroy_arch(task_t *t);
270
#endif
270
#endif
271
 
271
 
272
extern unative_t sys_task_get_id(task_id_t *uspace_task_id);
272
extern unative_t sys_task_get_id(task_id_t *uspace_task_id);
273
 
273
 
274
#endif
274
#endif
275
 
275
 
276
/** @}
276
/** @}
277
 */
277
 */
278
 
278