Rev 3424 | Rev 3426 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
| Rev 3424 | Rev 3425 | ||
|---|---|---|---|
| Line 33... | Line 33... | ||
| 33 | /** |
33 | /** |
| 34 | * @file |
34 | * @file |
| 35 | * @brief Task management. |
35 | * @brief Task management. |
| 36 | */ |
36 | */ |
| 37 | 37 | ||
| 38 | #include <main/uinit.h> |
- | |
| 39 | #include <proc/thread.h> |
38 | #include <proc/thread.h> |
| 40 | #include <proc/task.h> |
39 | #include <proc/task.h> |
| 41 | #include <proc/uarg.h> |
- | |
| 42 | #include <mm/as.h> |
40 | #include <mm/as.h> |
| 43 | #include <mm/slab.h> |
41 | #include <mm/slab.h> |
| 44 | #include <atomic.h> |
42 | #include <atomic.h> |
| 45 | #include <synch/spinlock.h> |
43 | #include <synch/spinlock.h> |
| 46 | #include <synch/waitq.h> |
44 | #include <synch/waitq.h> |
| 47 | #include <arch.h> |
45 | #include <arch.h> |
| 48 | #include <arch/barrier.h> |
46 | #include <arch/barrier.h> |
| 49 | #include <panic.h> |
- | |
| 50 | #include <adt/avl.h> |
47 | #include <adt/avl.h> |
| 51 | #include <adt/btree.h> |
48 | #include <adt/btree.h> |
| 52 | #include <adt/list.h> |
49 | #include <adt/list.h> |
| 53 | #include <ipc/ipc.h> |
50 | #include <ipc/ipc.h> |
| 54 | #include <security/cap.h> |
51 | #include <ipc/ipcrsc.h> |
| 55 | #include <memstr.h> |
- | |
| 56 | #include <print.h> |
52 | #include <print.h> |
| 57 | #include <lib/elf.h> |
- | |
| 58 | #include <errno.h> |
53 | #include <errno.h> |
| 59 | #include <func.h> |
54 | #include <func.h> |
| 60 | #include <syscall/copy.h> |
55 | #include <syscall/copy.h> |
| 61 | 56 | ||
| 62 | #ifndef LOADED_PROG_STACK_PAGES_NO |
- | |
| 63 | #define LOADED_PROG_STACK_PAGES_NO 1 |
- | |
| 64 | #endif |
- | |
| 65 | - | ||
| 66 | /** Spinlock protecting the tasks_tree AVL tree. */ |
57 | /** Spinlock protecting the tasks_tree AVL tree. */ |
| 67 | SPINLOCK_INITIALIZE(tasks_lock); |
58 | SPINLOCK_INITIALIZE(tasks_lock); |
| 68 | 59 | ||
| 69 | /** AVL tree of active tasks. |
60 | /** AVL tree of active tasks. |
| 70 | * |
61 | * |
| Line 78... | Line 69... | ||
| 78 | */ |
69 | */ |
| 79 | avltree_t tasks_tree; |
70 | avltree_t tasks_tree; |
| 80 | 71 | ||
| 81 | static task_id_t task_counter = 0; |
72 | static task_id_t task_counter = 0; |
| 82 | 73 | ||
| 83 | /** Initialize tasks |
- | |
| 84 | * |
- | |
| 85 | * Initialize kernel tasks support. |
74 | /** Initialize kernel tasks support. */ |
| 86 | * |
- | |
| 87 | */ |
- | |
| 88 | void task_init(void) |
75 | void task_init(void) |
| 89 | { |
76 | { |
| 90 | TASK = NULL; |
77 | TASK = NULL; |
| 91 | avltree_create(&tasks_tree); |
78 | avltree_create(&tasks_tree); |
| 92 | } |
79 | } |
| 93 | 80 | ||
| 94 | /* |
81 | /* |
| 95 | * The idea behind this walker is to remember a single task different from TASK. |
82 | * The idea behind this walker is to remember a single task different from |
| - | 83 | * TASK. |
|
| 96 | */ |
84 | */ |
| 97 | static bool task_done_walker(avltree_node_t *node, void *arg) |
85 | static bool task_done_walker(avltree_node_t *node, void *arg) |
| 98 | { |
86 | { |
| 99 | task_t *t = avltree_get_instance(node, task_t, tasks_tree_node); |
87 | task_t *t = avltree_get_instance(node, task_t, tasks_tree_node); |
| 100 | task_t **tp = (task_t **) arg; |
88 | task_t **tp = (task_t **) arg; |
| Line 105... | Line 93... | ||
| 105 | } |
93 | } |
| 106 | 94 | ||
| 107 | return true; /* continue the walk */ |
95 | return true; /* continue the walk */ |
| 108 | } |
96 | } |
| 109 | 97 | ||
| 110 | /** Kill all tasks except the current task. |
98 | /** Kill all tasks except the current task. */ |
| 111 | * |
- | |
| 112 | */ |
- | |
| 113 | void task_done(void) |
99 | void task_done(void) |
| 114 | { |
100 | { |
| 115 | task_t *t; |
101 | task_t *t; |
| 116 | do { /* Repeat until there are any tasks except TASK */ |
102 | do { /* Repeat until there are any tasks except TASK */ |
| 117 | 103 | ||
| Line 139... | Line 125... | ||
| 139 | } |
125 | } |
| 140 | 126 | ||
| 141 | } while (t != NULL); |
127 | } while (t != NULL); |
| 142 | } |
128 | } |
| 143 | 129 | ||
| 144 | /** Create new task |
- | |
| 145 | * |
- | |
| 146 | * Create new task with no threads. |
130 | /** Create new task with no threads. |
| 147 | * |
131 | * |
| 148 | * @param as Task's address space. |
132 | * @param as Task's address space. |
| 149 | * @param name Symbolic name. |
133 | * @param name Symbolic name. |
| 150 | * |
134 | * |
| 151 | * @return New task's structure |
135 | * @return New task's structure. |
| 152 | * |
136 | * |
| 153 | */ |
137 | */ |
| 154 | task_t *task_create(as_t *as, char *name) |
138 | task_t *task_create(as_t *as, char *name) |
| 155 | { |
139 | { |
| 156 | ipl_t ipl; |
140 | ipl_t ipl; |
| Line 187... | Line 171... | ||
| 187 | if ((ipc_phone_0) && (context_check(ipc_phone_0->task->context, |
171 | if ((ipc_phone_0) && (context_check(ipc_phone_0->task->context, |
| 188 | ta->context))) |
172 | ta->context))) |
| 189 | ipc_phone_connect(&ta->phones[0], ipc_phone_0); |
173 | ipc_phone_connect(&ta->phones[0], ipc_phone_0); |
| 190 | atomic_set(&ta->active_calls, 0); |
174 | atomic_set(&ta->active_calls, 0); |
| 191 | 175 | ||
| 192 | mutex_initialize(&ta->futexes_lock); |
176 | mutex_initialize(&ta->futexes_lock, MUTEX_PASSIVE); |
| 193 | btree_create(&ta->futexes); |
177 | btree_create(&ta->futexes); |
| 194 | 178 | ||
| 195 | ipl = interrupts_disable(); |
179 | ipl = interrupts_disable(); |
| 196 | 180 | ||
| 197 | /* |
181 | /* |
| Line 210... | Line 194... | ||
| 210 | return ta; |
194 | return ta; |
| 211 | } |
195 | } |
| 212 | 196 | ||
| 213 | /** Destroy task. |
197 | /** Destroy task. |
| 214 | * |
198 | * |
| 215 | * @param t Task to be destroyed. |
199 | * @param t Task to be destroyed. |
| 216 | */ |
200 | */ |
| 217 | void task_destroy(task_t *t) |
201 | void task_destroy(task_t *t) |
| 218 | { |
202 | { |
| 219 | /* |
203 | /* |
| 220 | * Remove the task from the task B+tree. |
204 | * Remove the task from the task B+tree. |
| Line 243... | Line 227... | ||
| 243 | TASK = NULL; |
227 | TASK = NULL; |
| 244 | } |
228 | } |
| 245 | 229 | ||
| 246 | /** Syscall for reading task ID from userspace. |
230 | /** Syscall for reading task ID from userspace. |
| 247 | * |
231 | * |
| 248 | * @param uspace_task_id Userspace address of 8-byte buffer where to store |
232 | * @param uspace_task_id userspace address of 8-byte buffer |
| 249 | * current task ID. |
233 | * where to store current task ID. |
| 250 | * |
234 | * |
| 251 | * @return 0 on success or an error code from @ref errno.h. |
235 | * @return Zero on success or an error code from @ref errno.h. |
| 252 | */ |
236 | */ |
| 253 | unative_t sys_task_get_id(task_id_t *uspace_task_id) |
237 | unative_t sys_task_get_id(task_id_t *uspace_task_id) |
| 254 | { |
238 | { |
| 255 | /* |
239 | /* |
| 256 | * No need to acquire lock on TASK because taskid |
240 | * No need to acquire lock on TASK because taskid remains constant for |
| 257 | * remains constant for the lifespan of the task. |
241 | * the lifespan of the task. |
| 258 | */ |
242 | */ |
| 259 | return (unative_t) copy_to_uspace(uspace_task_id, &TASK->taskid, |
243 | return (unative_t) copy_to_uspace(uspace_task_id, &TASK->taskid, |
| 260 | sizeof(TASK->taskid)); |
244 | sizeof(TASK->taskid)); |
| 261 | } |
245 | } |
| 262 | 246 | ||
| 263 | unative_t sys_task_spawn(void *image, size_t size) |
- | |
| 264 | { |
- | |
| 265 | void *kimage = malloc(size, 0); |
- | |
| 266 | if (kimage == NULL) |
- | |
| 267 | return ENOMEM; |
- | |
| 268 | - | ||
| 269 | int rc = copy_from_uspace(kimage, image, size); |
- | |
| 270 | if (rc != EOK) |
- | |
| 271 | return rc; |
- | |
| 272 | - | ||
| 273 | /* |
- | |
| 274 | * Not very efficient and it would be better to call it on code only, |
- | |
| 275 | * but this whole function is a temporary hack anyway and one day it |
- | |
| 276 | * will go in favor of the userspace dynamic loader. |
- | |
| 277 | */ |
- | |
| 278 | smc_coherence_block(kimage, size); |
- | |
| 279 | - | ||
| 280 | uspace_arg_t *kernel_uarg; |
- | |
| 281 | kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); |
- | |
| 282 | if (kernel_uarg == NULL) { |
- | |
| 283 | free(kimage); |
- | |
| 284 | return ENOMEM; |
- | |
| 285 | } |
- | |
| 286 | - | ||
| 287 | kernel_uarg->uspace_entry = |
- | |
| 288 | (void *) ((elf_header_t *) kimage)->e_entry; |
- | |
| 289 | kernel_uarg->uspace_stack = (void *) USTACK_ADDRESS; |
- | |
| 290 | kernel_uarg->uspace_thread_function = NULL; |
- | |
| 291 | kernel_uarg->uspace_thread_arg = NULL; |
- | |
| 292 | kernel_uarg->uspace_uarg = NULL; |
- | |
| 293 | - | ||
| 294 | as_t *as = as_create(0); |
- | |
| 295 | if (as == NULL) { |
- | |
| 296 | free(kernel_uarg); |
- | |
| 297 | free(kimage); |
- | |
| 298 | return ENOMEM; |
- | |
| 299 | } |
- | |
| 300 | - | ||
| 301 | unsigned int erc = elf_load((elf_header_t *) kimage, as); |
- | |
| 302 | if (erc != EE_OK) { |
- | |
| 303 | as_destroy(as); |
- | |
| 304 | free(kernel_uarg); |
- | |
| 305 | free(kimage); |
- | |
| 306 | return ENOENT; |
- | |
| 307 | } |
- | |
| 308 | - | ||
| 309 | as_area_t *area = as_area_create(as, |
- | |
| 310 | AS_AREA_READ | AS_AREA_WRITE | AS_AREA_CACHEABLE, |
- | |
| 311 | LOADED_PROG_STACK_PAGES_NO * PAGE_SIZE, USTACK_ADDRESS, |
- | |
| 312 | AS_AREA_ATTR_NONE, &anon_backend, NULL); |
- | |
| 313 | if (area == NULL) { |
- | |
| 314 | as_destroy(as); |
- | |
| 315 | free(kernel_uarg); |
- | |
| 316 | free(kimage); |
- | |
| 317 | return ENOMEM; |
- | |
| 318 | } |
- | |
| 319 | - | ||
| 320 | task_t *task = task_create(as, "app"); |
- | |
| 321 | if (task == NULL) { |
- | |
| 322 | as_destroy(as); |
- | |
| 323 | free(kernel_uarg); |
- | |
| 324 | free(kimage); |
- | |
| 325 | return ENOENT; |
- | |
| 326 | } |
- | |
| 327 | - | ||
| 328 | // FIXME: control the capabilities |
- | |
| 329 | cap_set(task, cap_get(TASK)); |
- | |
| 330 | - | ||
| 331 | thread_t *thread = thread_create(uinit, kernel_uarg, task, |
- | |
| 332 | THREAD_FLAG_USPACE, "user", false); |
- | |
| 333 | if (thread == NULL) { |
- | |
| 334 | task_destroy(task); |
- | |
| 335 | as_destroy(as); |
- | |
| 336 | free(kernel_uarg); |
- | |
| 337 | free(kimage); |
- | |
| 338 | return ENOENT; |
- | |
| 339 | } |
- | |
| 340 | - | ||
| 341 | thread_ready(thread); |
- | |
| 342 | - | ||
| 343 | return EOK; |
- | |
| 344 | } |
- | |
| 345 | - | ||
| 346 | /** Find task structure corresponding to task ID. |
247 | /** Find task structure corresponding to task ID. |
| 347 | * |
248 | * |
| 348 | * The tasks_lock must be already held by the caller of this function |
249 | * The tasks_lock must be already held by the caller of this function and |
| 349 | * and interrupts must be disabled. |
250 | * interrupts must be disabled. |
| 350 | * |
251 | * |
| 351 | * @param id Task ID. |
252 | * @param id Task ID. |
| 352 | * |
253 | * |
| 353 | * @return Task structure address or NULL if there is no such task ID. |
254 | * @return Task structure address or NULL if there is no such task |
| - | 255 | * ID. |
|
| 354 | */ |
256 | */ |
| 355 | task_t *task_find_by_id(task_id_t id) |
257 | task_t *task_find_by_id(task_id_t id) { avltree_node_t *node; |
| 356 | { |
- | |
| 357 | avltree_node_t *node; |
- | |
| 358 | 258 | ||
| 359 | node = avltree_search(&tasks_tree, (avltree_key_t) id); |
259 | node = avltree_search(&tasks_tree, (avltree_key_t) id); |
| 360 | 260 | ||
| 361 | if (node) |
261 | if (node) |
| 362 | return avltree_get_instance(node, task_t, tasks_tree_node); |
262 | return avltree_get_instance(node, task_t, tasks_tree_node); |
| 363 | return NULL; |
263 | return NULL; |
| 364 | } |
264 | } |
| 365 | 265 | ||
| 366 | /** Get accounting data of given task. |
266 | /** Get accounting data of given task. |
| 367 | * |
267 | * |
| 368 | * Note that task lock of 't' must be already held and |
268 | * Note that task lock of 't' must be already held and interrupts must be |
| 369 | * interrupts must be already disabled. |
269 | * already disabled. |
| 370 | * |
270 | * |
| 371 | * @param t Pointer to thread. |
271 | * @param t Pointer to thread. |
| 372 | * |
272 | * |
| - | 273 | * @return Number of cycles used by the task and all its threads |
|
| - | 274 | * so far. |
|
| 373 | */ |
275 | */ |
| 374 | uint64_t task_get_accounting(task_t *t) |
276 | uint64_t task_get_accounting(task_t *t) |
| 375 | { |
277 | { |
| 376 | /* Accumulated value of task */ |
278 | /* Accumulated value of task */ |
| 377 | uint64_t ret = t->cycles; |
279 | uint64_t ret = t->cycles; |
| Line 399... | Line 301... | ||
| 399 | /** Kill task. |
301 | /** Kill task. |
| 400 | * |
302 | * |
| 401 | * This function is idempotent. |
303 | * This function is idempotent. |
| 402 | * It signals all the task's threads to bail it out. |
304 | * It signals all the task's threads to bail it out. |
| 403 | * |
305 | * |
| 404 | * @param id ID of the task to be killed. |
306 | * @param id ID of the task to be killed. |
| 405 | * |
307 | * |
| 406 | * @return 0 on success or an error code from errno.h |
308 | * @return Zero on success or an error code from errno.h. |
| 407 | */ |
309 | */ |
| 408 | int task_kill(task_id_t id) |
310 | int task_kill(task_id_t id) |
| 409 | { |
311 | { |
| 410 | ipl_t ipl; |
312 | ipl_t ipl; |
| 411 | task_t *ta; |
313 | task_t *ta; |
| Line 422... | Line 324... | ||
| 422 | return ENOENT; |
324 | return ENOENT; |
| 423 | } |
325 | } |
| 424 | spinlock_unlock(&tasks_lock); |
326 | spinlock_unlock(&tasks_lock); |
| 425 | 327 | ||
| 426 | /* |
328 | /* |
| 427 | * Interrupt all threads except ktaskclnp. |
329 | * Interrupt all threads. |
| 428 | */ |
330 | */ |
| 429 | spinlock_lock(&ta->lock); |
331 | spinlock_lock(&ta->lock); |
| 430 | for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) { |
332 | for (cur = ta->th_head.next; cur != &ta->th_head; cur = cur->next) { |
| 431 | thread_t *thr; |
333 | thread_t *thr; |
| 432 | bool sleeping = false; |
334 | bool sleeping = false; |