Rev 1766 | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1766 | Rev 1780 | ||
---|---|---|---|
Line 88... | Line 88... | ||
88 | * as the threads_lock is held. |
88 | * as the threads_lock is held. |
89 | */ |
89 | */ |
90 | btree_t threads_btree; |
90 | btree_t threads_btree; |
91 | 91 | ||
92 | SPINLOCK_INITIALIZE(tidlock); |
92 | SPINLOCK_INITIALIZE(tidlock); |
93 | __u32 last_tid = 0; |
93 | uint32_t last_tid = 0; |
94 | 94 | ||
95 | static slab_cache_t *thread_slab; |
95 | static slab_cache_t *thread_slab; |
96 | #ifdef ARCH_HAS_FPU |
96 | #ifdef ARCH_HAS_FPU |
97 | slab_cache_t *fpu_context_slab; |
97 | slab_cache_t *fpu_context_slab; |
98 | #endif |
98 | #endif |
Line 252... | Line 252... | ||
252 | spinlock_unlock(&t->cpu->lock); |
252 | spinlock_unlock(&t->cpu->lock); |
253 | 253 | ||
254 | spinlock_unlock(&t->lock); |
254 | spinlock_unlock(&t->lock); |
255 | 255 | ||
256 | spinlock_lock(&threads_lock); |
256 | spinlock_lock(&threads_lock); |
257 | btree_remove(&threads_btree, (btree_key_t) ((__address ) t), NULL); |
257 | btree_remove(&threads_btree, (btree_key_t) ((uintptr_t ) t), NULL); |
258 | spinlock_unlock(&threads_lock); |
258 | spinlock_unlock(&threads_lock); |
259 | 259 | ||
260 | /* |
260 | /* |
261 | * Detach from the containing task. |
261 | * Detach from the containing task. |
262 | */ |
262 | */ |
Line 297... | Line 297... | ||
297 | return NULL; |
297 | return NULL; |
298 | 298 | ||
299 | thread_create_arch(t); |
299 | thread_create_arch(t); |
300 | 300 | ||
301 | /* Not needed, but good for debugging */ |
301 | /* Not needed, but good for debugging */ |
302 | memsetb((__address)t->kstack, THREAD_STACK_SIZE * 1<<STACK_FRAMES, 0); |
302 | memsetb((uintptr_t)t->kstack, THREAD_STACK_SIZE * 1<<STACK_FRAMES, 0); |
303 | 303 | ||
304 | ipl = interrupts_disable(); |
304 | ipl = interrupts_disable(); |
305 | spinlock_lock(&tidlock); |
305 | spinlock_lock(&tidlock); |
306 | t->tid = ++last_tid; |
306 | t->tid = ++last_tid; |
307 | spinlock_unlock(&tidlock); |
307 | spinlock_unlock(&tidlock); |
308 | interrupts_restore(ipl); |
308 | interrupts_restore(ipl); |
309 | 309 | ||
310 | context_save(&t->saved_context); |
310 | context_save(&t->saved_context); |
311 | context_set(&t->saved_context, FADDR(cushion), (__address) t->kstack, THREAD_STACK_SIZE); |
311 | context_set(&t->saved_context, FADDR(cushion), (uintptr_t) t->kstack, THREAD_STACK_SIZE); |
312 | 312 | ||
313 | the_initialize((the_t *) t->kstack); |
313 | the_initialize((the_t *) t->kstack); |
314 | 314 | ||
315 | ipl = interrupts_disable(); |
315 | ipl = interrupts_disable(); |
316 | t->saved_context.ipl = interrupts_read(); |
316 | t->saved_context.ipl = interrupts_read(); |
Line 366... | Line 366... | ||
366 | 366 | ||
367 | /* |
367 | /* |
368 | * Register this thread in the system-wide list. |
368 | * Register this thread in the system-wide list. |
369 | */ |
369 | */ |
370 | spinlock_lock(&threads_lock); |
370 | spinlock_lock(&threads_lock); |
371 | btree_insert(&threads_btree, (btree_key_t) ((__address) t), (void *) t, NULL); |
371 | btree_insert(&threads_btree, (btree_key_t) ((uintptr_t) t), (void *) t, NULL); |
372 | spinlock_unlock(&threads_lock); |
372 | spinlock_unlock(&threads_lock); |
373 | 373 | ||
374 | interrupts_restore(ipl); |
374 | interrupts_restore(ipl); |
375 | 375 | ||
376 | return t; |
376 | return t; |
Line 409... | Line 409... | ||
409 | * Suspend execution of the current thread. |
409 | * Suspend execution of the current thread. |
410 | * |
410 | * |
411 | * @param sec Number of seconds to sleep. |
411 | * @param sec Number of seconds to sleep. |
412 | * |
412 | * |
413 | */ |
413 | */ |
414 | void thread_sleep(__u32 sec) |
414 | void thread_sleep(uint32_t sec) |
415 | { |
415 | { |
416 | thread_usleep(sec*1000000); |
416 | thread_usleep(sec*1000000); |
417 | } |
417 | } |
418 | 418 | ||
419 | /** Wait for another thread to exit. |
419 | /** Wait for another thread to exit. |
Line 422... | Line 422... | ||
422 | * @param usec Timeout in microseconds. |
422 | * @param usec Timeout in microseconds. |
423 | * @param flags Mode of operation. |
423 | * @param flags Mode of operation. |
424 | * |
424 | * |
425 | * @return An error code from errno.h or an error code from synch.h. |
425 | * @return An error code from errno.h or an error code from synch.h. |
426 | */ |
426 | */ |
427 | int thread_join_timeout(thread_t *t, __u32 usec, int flags) |
427 | int thread_join_timeout(thread_t *t, uint32_t usec, int flags) |
428 | { |
428 | { |
429 | ipl_t ipl; |
429 | ipl_t ipl; |
430 | int rc; |
430 | int rc; |
431 | 431 | ||
432 | if (t == THREAD) |
432 | if (t == THREAD) |
Line 482... | Line 482... | ||
482 | * Suspend execution of the current thread. |
482 | * Suspend execution of the current thread. |
483 | * |
483 | * |
484 | * @param usec Number of microseconds to sleep. |
484 | * @param usec Number of microseconds to sleep. |
485 | * |
485 | * |
486 | */ |
486 | */ |
487 | void thread_usleep(__u32 usec) |
487 | void thread_usleep(uint32_t usec) |
488 | { |
488 | { |
489 | waitq_t wq; |
489 | waitq_t wq; |
490 | 490 | ||
491 | waitq_initialize(&wq); |
491 | waitq_initialize(&wq); |
492 | 492 | ||
Line 562... | Line 562... | ||
562 | */ |
562 | */ |
563 | bool thread_exists(thread_t *t) |
563 | bool thread_exists(thread_t *t) |
564 | { |
564 | { |
565 | btree_node_t *leaf; |
565 | btree_node_t *leaf; |
566 | 566 | ||
567 | return btree_search(&threads_btree, (btree_key_t) ((__address) t), &leaf) != NULL; |
567 | return btree_search(&threads_btree, (btree_key_t) ((uintptr_t) t), &leaf) != NULL; |
568 | } |
568 | } |
569 | 569 | ||
570 | /** Process syscall to create new thread. |
570 | /** Process syscall to create new thread. |
571 | * |
571 | * |
572 | */ |
572 | */ |
573 | __native sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name) |
573 | unative_t sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name) |
574 | { |
574 | { |
575 | thread_t *t; |
575 | thread_t *t; |
576 | char namebuf[THREAD_NAME_BUFLEN]; |
576 | char namebuf[THREAD_NAME_BUFLEN]; |
577 | uspace_arg_t *kernel_uarg; |
577 | uspace_arg_t *kernel_uarg; |
578 | __u32 tid; |
578 | uint32_t tid; |
579 | int rc; |
579 | int rc; |
580 | 580 | ||
581 | rc = copy_from_uspace(namebuf, uspace_name, THREAD_NAME_BUFLEN); |
581 | rc = copy_from_uspace(namebuf, uspace_name, THREAD_NAME_BUFLEN); |
582 | if (rc != 0) |
582 | if (rc != 0) |
583 | return (__native) rc; |
583 | return (unative_t) rc; |
584 | 584 | ||
585 | kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); |
585 | kernel_uarg = (uspace_arg_t *) malloc(sizeof(uspace_arg_t), 0); |
586 | rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t)); |
586 | rc = copy_from_uspace(kernel_uarg, uspace_uarg, sizeof(uspace_arg_t)); |
587 | if (rc != 0) { |
587 | if (rc != 0) { |
588 | free(kernel_uarg); |
588 | free(kernel_uarg); |
589 | return (__native) rc; |
589 | return (unative_t) rc; |
590 | } |
590 | } |
591 | 591 | ||
592 | if ((t = thread_create(uinit, kernel_uarg, TASK, 0, namebuf))) { |
592 | if ((t = thread_create(uinit, kernel_uarg, TASK, 0, namebuf))) { |
593 | tid = t->tid; |
593 | tid = t->tid; |
594 | thread_ready(t); |
594 | thread_ready(t); |
595 | return (__native) tid; |
595 | return (unative_t) tid; |
596 | } else { |
596 | } else { |
597 | free(kernel_uarg); |
597 | free(kernel_uarg); |
598 | } |
598 | } |
599 | 599 | ||
600 | return (__native) ENOMEM; |
600 | return (unative_t) ENOMEM; |
601 | } |
601 | } |
602 | 602 | ||
603 | /** Process syscall to terminate thread. |
603 | /** Process syscall to terminate thread. |
604 | * |
604 | * |
605 | */ |
605 | */ |
606 | __native sys_thread_exit(int uspace_status) |
606 | unative_t sys_thread_exit(int uspace_status) |
607 | { |
607 | { |
608 | thread_exit(); |
608 | thread_exit(); |
609 | /* Unreachable */ |
609 | /* Unreachable */ |
610 | return 0; |
610 | return 0; |
611 | } |
611 | } |