Rev 2071 | Rev 2090 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
| Rev 2071 | Rev 2089 | ||
|---|---|---|---|
| Line 1... | Line 1... | ||
| 1 | /* |
1 | /*3D |
| 2 | * Copyright (c) 2001-2004 Jakub Jermar |
2 | * Copyright (c) 2001-2004 Jakub Jermar |
| 3 | * All rights reserved. |
3 | * All rights reserved. |
| 4 | * |
4 | * |
| 5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
| 6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
| Line 33... | Line 33... | ||
| 33 | */ |
33 | */ |
| 34 | 34 | ||
| 35 | #ifndef KERN_TASK_H_ |
35 | #ifndef KERN_TASK_H_ |
| 36 | #define KERN_TASK_H_ |
36 | #define KERN_TASK_H_ |
| 37 | 37 | ||
| 38 | #include <typedefs.h> |
- | |
| 39 | #include <synch/spinlock.h> |
38 | #include <synch/spinlock.h> |
| 40 | #include <synch/mutex.h> |
39 | #include <synch/mutex.h> |
| - | 40 | #include <synch/rwlock.h> |
|
| 41 | #include <synch/futex.h> |
41 | #include <synch/futex.h> |
| 42 | #include <adt/btree.h> |
42 | #include <adt/btree.h> |
| 43 | #include <adt/list.h> |
43 | #include <adt/list.h> |
| 44 | #include <ipc/ipc.h> |
- | |
| 45 | #include <security/cap.h> |
44 | #include <security/cap.h> |
| 46 | #include <arch/proc/task.h> |
45 | #include <arch/proc/task.h> |
| - | 46 | #include <arch/proc/thread.h> |
|
| - | 47 | #include <arch/context.h> |
|
| - | 48 | #include <arch/fpu_context.h> |
|
| - | 49 | #include <arch/cpu.h> |
|
| - | 50 | #include <mm/tlb.h> |
|
| - | 51 | #include <proc/scheduler.h> |
|
| - | 52 | ||
| - | 53 | #define IPC_MAX_PHONES 16 |
|
| - | 54 | #define THREAD_NAME_BUFLEN 20 |
|
| - | 55 | ||
| - | 56 | struct answerbox; |
|
| - | 57 | struct task; |
|
| - | 58 | struct thread; |
|
| - | 59 | ||
| - | 60 | typedef enum { |
|
| - | 61 | IPC_PHONE_FREE = 0, /**< Phone is free and can be allocated */ |
|
| - | 62 | IPC_PHONE_CONNECTING, /**< Phone is connecting somewhere */ |
|
| - | 63 | IPC_PHONE_CONNECTED, /**< Phone is connected */ |
|
| - | 64 | IPC_PHONE_HUNGUP, /**< Phone is hung up, waiting for answers to come */ |
|
| - | 65 | IPC_PHONE_SLAMMED /**< Phone was hungup from server */ |
|
| - | 66 | } ipc_phone_state_t; |
|
| - | 67 | ||
| - | 68 | /** Structure identifying phone (in TASK structure) */ |
|
| - | 69 | typedef struct { |
|
| - | 70 | SPINLOCK_DECLARE(lock); |
|
| - | 71 | link_t link; |
|
| - | 72 | struct answerbox *callee; |
|
| - | 73 | ipc_phone_state_t state; |
|
| - | 74 | atomic_t active_calls; |
|
| - | 75 | } phone_t; |
|
| - | 76 | ||
| - | 77 | typedef struct answerbox { |
|
| - | 78 | SPINLOCK_DECLARE(lock); |
|
| - | 79 | ||
| - | 80 | struct task *task; |
|
| - | 81 | ||
| - | 82 | waitq_t wq; |
|
| - | 83 | ||
| - | 84 | link_t connected_phones; /**< Phones connected to this answerbox */ |
|
| - | 85 | link_t calls; /**< Received calls */ |
|
| - | 86 | link_t dispatched_calls; /* Should be hash table in the future */ |
|
| - | 87 | ||
| - | 88 | link_t answers; /**< Answered calls */ |
|
| - | 89 | ||
| - | 90 | SPINLOCK_DECLARE(irq_lock); |
|
| - | 91 | link_t irq_notifs; /**< Notifications from IRQ handlers */ |
|
| - | 92 | link_t irq_head; /**< IRQs with notifications to this answerbox. */ |
|
| - | 93 | } answerbox_t; |
|
| 47 | 94 | ||
| 48 | /** Task structure. */ |
95 | /** Task structure. */ |
| 49 | struct task { |
96 | typedef struct task { |
| 50 | /** Task lock. |
97 | /** Task lock. |
| 51 | * |
98 | * |
| 52 | * Must be acquired before threads_lock and thread lock of any of its threads. |
99 | * Must be acquired before threads_lock and thread lock of any of its threads. |
| 53 | */ |
100 | */ |
| 54 | SPINLOCK_DECLARE(lock); |
101 | SPINLOCK_DECLARE(lock); |
| 55 | 102 | ||
| 56 | char *name; |
103 | char *name; |
| 57 | thread_t *main_thread; /**< Pointer to the main thread. */ |
104 | struct thread *main_thread; /**< Pointer to the main thread. */ |
| 58 | link_t th_head; /**< List of threads contained in this task. */ |
105 | link_t th_head; /**< List of threads contained in this task. */ |
| 59 | as_t *as; /**< Address space. */ |
106 | as_t *as; /**< Address space. */ |
| 60 | task_id_t taskid; /**< Unique identity of task */ |
107 | task_id_t taskid; /**< Unique identity of task */ |
| 61 | context_id_t context; /**< Task security context */ |
108 | context_id_t context; /**< Task security context */ |
| 62 | 109 | ||
| Line 82... | Line 129... | ||
| 82 | */ |
129 | */ |
| 83 | mutex_t futexes_lock; |
130 | mutex_t futexes_lock; |
| 84 | btree_t futexes; /**< B+tree of futexes referenced by this task. */ |
131 | btree_t futexes; /**< B+tree of futexes referenced by this task. */ |
| 85 | 132 | ||
| 86 | uint64_t cycles; /**< Accumulated accounting. */ |
133 | uint64_t cycles; /**< Accumulated accounting. */ |
| - | 134 | } task_t; |
|
| - | 135 | ||
| - | 136 | /** CPU structure. |
|
| 87 | }; |
137 | * |
| - | 138 | * There is one structure like this for every processor. |
|
| - | 139 | */ |
|
| - | 140 | typedef struct { |
|
| - | 141 | SPINLOCK_DECLARE(lock); |
|
| - | 142 | ||
| - | 143 | tlb_shootdown_msg_t tlb_messages[TLB_MESSAGE_QUEUE_LEN]; |
|
| - | 144 | count_t tlb_messages_count; |
|
| - | 145 | ||
| - | 146 | context_t saved_context; |
|
| - | 147 | ||
| - | 148 | atomic_t nrdy; |
|
| - | 149 | runq_t rq[RQ_COUNT]; |
|
| - | 150 | volatile count_t needs_relink; |
|
| - | 151 | ||
| - | 152 | SPINLOCK_DECLARE(timeoutlock); |
|
| - | 153 | link_t timeout_active_head; |
|
| - | 154 | ||
| - | 155 | count_t missed_clock_ticks; /**< When system clock loses a tick, it is recorded here |
|
| - | 156 | so that clock() can react. This variable is |
|
| - | 157 | CPU-local and can be only accessed when interrupts |
|
| - | 158 | are disabled. */ |
|
| - | 159 | ||
| - | 160 | /** |
|
| - | 161 | * Processor ID assigned by kernel. |
|
| - | 162 | */ |
|
| - | 163 | int id; |
|
| - | 164 | ||
| - | 165 | int active; |
|
| - | 166 | int tlb_active; |
|
| - | 167 | ||
| - | 168 | uint16_t frequency_mhz; |
|
| - | 169 | uint32_t delay_loop_const; |
|
| - | 170 | ||
| - | 171 | cpu_arch_t arch; |
|
| - | 172 | ||
| - | 173 | struct thread *fpu_owner; |
|
| - | 174 | ||
| - | 175 | /** |
|
| - | 176 | * Stack used by scheduler when there is no running thread. |
|
| - | 177 | */ |
|
| - | 178 | uint8_t *stack; |
|
| - | 179 | } cpu_t; |
|
| - | 180 | ||
| - | 181 | typedef void (* timeout_handler_t)(void *arg); |
|
| - | 182 | ||
| - | 183 | typedef struct { |
|
| - | 184 | SPINLOCK_DECLARE(lock); |
|
| - | 185 | ||
| - | 186 | link_t link; /**< Link to the list of active timeouts on THE->cpu */ |
|
| - | 187 | ||
| - | 188 | uint64_t ticks; /**< Timeout will be activated in this amount of clock() ticks. */ |
|
| - | 189 | ||
| - | 190 | timeout_handler_t handler; /**< Function that will be called on timeout activation. */ |
|
| - | 191 | void *arg; /**< Argument to be passed to handler() function. */ |
|
| - | 192 | ||
| - | 193 | cpu_t *cpu; /**< On which processor is this timeout registered. */ |
|
| - | 194 | } timeout_t; |
|
| - | 195 | ||
| - | 196 | /** Thread states. */ |
|
| - | 197 | typedef enum { |
|
| - | 198 | Invalid, /**< It is an error, if thread is found in this state. */ |
|
| - | 199 | Running, /**< State of a thread that is currently executing on some CPU. */ |
|
| - | 200 | Sleeping, /**< Thread in this state is waiting for an event. */ |
|
| - | 201 | Ready, /**< State of threads in a run queue. */ |
|
| - | 202 | Entering, /**< Threads are in this state before they are first readied. */ |
|
| - | 203 | Exiting, /**< After a thread calls thread_exit(), it is put into Exiting state. */ |
|
| - | 204 | Undead /**< Threads that were not detached but exited are in the Undead state. */ |
|
| - | 205 | } state_t; |
|
| - | 206 | ||
| - | 207 | /** Join types. */ |
|
| - | 208 | typedef enum { |
|
| - | 209 | None, |
|
| - | 210 | TaskClnp, /**< The thread will be joined by ktaskclnp thread. */ |
|
| - | 211 | TaskGC /**< The thread will be joined by ktaskgc thread. */ |
|
| - | 212 | } thread_join_type_t; |
|
| - | 213 | ||
| - | 214 | /** Thread structure. There is one per thread. */ |
|
| - | 215 | typedef struct thread { |
|
| - | 216 | link_t rq_link; /**< Run queue link. */ |
|
| - | 217 | link_t wq_link; /**< Wait queue link. */ |
|
| - | 218 | link_t th_link; /**< Links to threads within containing task. */ |
|
| - | 219 | ||
| - | 220 | /** Lock protecting thread structure. |
|
| - | 221 | * |
|
| - | 222 | * Protects the whole thread structure except list links above. |
|
| - | 223 | */ |
|
| - | 224 | SPINLOCK_DECLARE(lock); |
|
| - | 225 | ||
| - | 226 | char name[THREAD_NAME_BUFLEN]; |
|
| - | 227 | ||
| - | 228 | void (* thread_code)(void *); /**< Function implementing the thread. */ |
|
| - | 229 | void *thread_arg; /**< Argument passed to thread_code() function. */ |
|
| - | 230 | ||
| - | 231 | /** From here, the stored context is restored when the thread is scheduled. */ |
|
| - | 232 | context_t saved_context; |
|
| - | 233 | /** From here, the stored timeout context is restored when sleep times out. */ |
|
| - | 234 | context_t sleep_timeout_context; |
|
| - | 235 | /** From here, the stored interruption context is restored when sleep is interrupted. */ |
|
| - | 236 | context_t sleep_interruption_context; |
|
| - | 237 | ||
| - | 238 | bool sleep_interruptible; /**< If true, the thread can be interrupted from sleep. */ |
|
| - | 239 | waitq_t *sleep_queue; /**< Wait queue in which this thread sleeps. */ |
|
| - | 240 | timeout_t sleep_timeout; /**< Timeout used for timeoutable sleeping. */ |
|
| - | 241 | volatile int timeout_pending; /**< Flag signalling sleep timeout in progress. */ |
|
| - | 242 | ||
| - | 243 | /** True if this thread is executing copy_from_uspace(). False otherwise. */ |
|
| - | 244 | bool in_copy_from_uspace; |
|
| - | 245 | /** True if this thread is executing copy_to_uspace(). False otherwise. */ |
|
| - | 246 | bool in_copy_to_uspace; |
|
| - | 247 | ||
| - | 248 | /** |
|
| - | 249 | * If true, the thread will not go to sleep at all and will |
|
| - | 250 | * call thread_exit() before returning to userspace. |
|
| - | 251 | */ |
|
| - | 252 | bool interrupted; |
|
| - | 253 | ||
| - | 254 | thread_join_type_t join_type; /**< Who joinins the thread. */ |
|
| - | 255 | bool detached; /**< If true, thread_join_timeout() cannot be used on this thread. */ |
|
| - | 256 | waitq_t join_wq; /**< Waitq for thread_join_timeout(). */ |
|
| - | 257 | ||
| - | 258 | fpu_context_t *saved_fpu_context; |
|
| - | 259 | int fpu_context_exists; |
|
| - | 260 | ||
| - | 261 | /* |
|
| - | 262 | * Defined only if thread doesn't run. |
|
| - | 263 | * It means that fpu context is in CPU that last time executes this thread. |
|
| - | 264 | * This disables migration. |
|
| - | 265 | */ |
|
| - | 266 | int fpu_context_engaged; |
|
| - | 267 | ||
| - | 268 | rwlock_type_t rwlock_holder_type; |
|
| - | 269 | ||
| - | 270 | void (* call_me)(void *); /**< Funtion to be called in scheduler before the thread is put asleep. */ |
|
| - | 271 | void *call_me_with; /**< Argument passed to call_me(). */ |
|
| - | 272 | ||
| - | 273 | state_t state; /**< Thread's state. */ |
|
| - | 274 | int flags; /**< Thread's flags. */ |
|
| - | 275 | ||
| - | 276 | cpu_t *cpu; /**< Thread's CPU. */ |
|
| - | 277 | task_t *task; /**< Containing task. */ |
|
| - | 278 | ||
| - | 279 | uint64_t ticks; /**< Ticks before preemption. */ |
|
| - | 280 | ||
| - | 281 | uint64_t cycles; /**< Thread accounting. */ |
|
| - | 282 | uint64_t last_cycle; /**< Last sampled cycle. */ |
|
| - | 283 | bool uncounted; /**< Thread doesn't affect accumulated accounting. */ |
|
| - | 284 | ||
| - | 285 | int priority; /**< Thread's priority. Implemented as index to CPU->rq */ |
|
| - | 286 | uint32_t tid; /**< Thread ID. */ |
|
| - | 287 | ||
| - | 288 | thread_arch_t arch; /**< Architecture-specific data. */ |
|
| - | 289 | ||
| - | 290 | uint8_t *kstack; /**< Thread's kernel stack. */ |
|
| - | 291 | } thread_t; |
|
| 88 | 292 | ||
| 89 | extern spinlock_t tasks_lock; |
293 | extern spinlock_t tasks_lock; |
| 90 | extern btree_t tasks_btree; |
294 | extern btree_t tasks_btree; |
| 91 | 295 | ||
| 92 | extern void task_init(void); |
296 | extern void task_init(void); |
| Line 95... | Line 299... | ||
| 95 | extern task_t *task_run_program(void *program_addr, char *name); |
299 | extern task_t *task_run_program(void *program_addr, char *name); |
| 96 | extern task_t *task_find_by_id(task_id_t id); |
300 | extern task_t *task_find_by_id(task_id_t id); |
| 97 | extern int task_kill(task_id_t id); |
301 | extern int task_kill(task_id_t id); |
| 98 | extern uint64_t task_get_accounting(task_t *t); |
302 | extern uint64_t task_get_accounting(task_t *t); |
| 99 | 303 | ||
| - | 304 | extern void cap_set(task_t *t, cap_t caps); |
|
| - | 305 | extern cap_t cap_get(task_t *t); |
|
| - | 306 | ||
| 100 | 307 | ||
| 101 | #ifndef task_create_arch |
308 | #ifndef task_create_arch |
| 102 | extern void task_create_arch(task_t *t); |
309 | extern void task_create_arch(task_t *t); |
| 103 | #endif |
310 | #endif |
| 104 | 311 | ||