Rev 1138 | Rev 1171 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
| Rev 1138 | Rev 1158 | ||
|---|---|---|---|
| Line 39... | Line 39... | ||
| 39 | #include <synch/waitq.h> |
39 | #include <synch/waitq.h> |
| 40 | #include <synch/rwlock.h> |
40 | #include <synch/rwlock.h> |
| 41 | #include <cpu.h> |
41 | #include <cpu.h> |
| 42 | #include <func.h> |
42 | #include <func.h> |
| 43 | #include <context.h> |
43 | #include <context.h> |
| - | 44 | #include <adt/btree.h> |
|
| 44 | #include <adt/list.h> |
45 | #include <adt/list.h> |
| 45 | #include <typedefs.h> |
46 | #include <typedefs.h> |
| 46 | #include <time/clock.h> |
47 | #include <time/clock.h> |
| 47 | #include <adt/list.h> |
48 | #include <adt/list.h> |
| 48 | #include <config.h> |
49 | #include <config.h> |
| Line 56... | Line 57... | ||
| 56 | #include <debug.h> |
57 | #include <debug.h> |
| 57 | #include <main/uinit.h> |
58 | #include <main/uinit.h> |
| 58 | 59 | ||
| 59 | char *thread_states[] = {"Invalid", "Running", "Sleeping", "Ready", "Entering", "Exiting"}; /**< Thread states */ |
60 | char *thread_states[] = {"Invalid", "Running", "Sleeping", "Ready", "Entering", "Exiting"}; /**< Thread states */ |
| 60 | 61 | ||
| 61 | SPINLOCK_INITIALIZE(threads_lock); /**< Lock protecting threads_head list. For locking rules, see declaration thereof. */ |
62 | /** Lock protecting threads_head list. For locking rules, see declaration thereof. */ |
| - | 63 | SPINLOCK_INITIALIZE(threads_lock); |
|
| 62 | LIST_INITIALIZE(threads_head); /**< List of all threads. */ |
64 | btree_t threads_btree; /**< B+tree of all threads. */ |
| 63 | 65 | ||
| 64 | SPINLOCK_INITIALIZE(tidlock); |
66 | SPINLOCK_INITIALIZE(tidlock); |
| 65 | __u32 last_tid = 0; |
67 | __u32 last_tid = 0; |
| 66 | 68 | ||
| 67 | static slab_cache_t *thread_slab; |
69 | static slab_cache_t *thread_slab; |
| 68 | #ifdef ARCH_HAS_FPU |
70 | #ifdef ARCH_HAS_FPU |
| 69 | slab_cache_t *fpu_context_slab; |
71 | slab_cache_t *fpu_context_slab; |
| 70 | #endif |
72 | #endif |
| 71 | 73 | ||
| 72 | - | ||
| 73 | /** Thread wrapper |
74 | /** Thread wrapper |
| 74 | * |
75 | * |
| 75 | * This wrapper is provided to ensure that every thread |
76 | * This wrapper is provided to ensure that every thread |
| 76 | * makes a call to thread_exit() when its implementing |
77 | * makes a call to thread_exit() when its implementing |
| 77 | * function returns. |
78 | * function returns. |
| Line 102... | Line 103... | ||
| 102 | 103 | ||
| 103 | spinlock_initialize(&t->lock, "thread_t_lock"); |
104 | spinlock_initialize(&t->lock, "thread_t_lock"); |
| 104 | link_initialize(&t->rq_link); |
105 | link_initialize(&t->rq_link); |
| 105 | link_initialize(&t->wq_link); |
106 | link_initialize(&t->wq_link); |
| 106 | link_initialize(&t->th_link); |
107 | link_initialize(&t->th_link); |
| 107 | link_initialize(&t->threads_link); |
- | |
| 108 | 108 | ||
| 109 | #ifdef ARCH_HAS_FPU |
109 | #ifdef ARCH_HAS_FPU |
| 110 | # ifdef CONFIG_FPU_LAZY |
110 | # ifdef CONFIG_FPU_LAZY |
| 111 | t->saved_fpu_context = NULL; |
111 | t->saved_fpu_context = NULL; |
| 112 | # else |
112 | # else |
| Line 158... | Line 158... | ||
| 158 | fpu_context_slab = slab_cache_create("fpu_slab", |
158 | fpu_context_slab = slab_cache_create("fpu_slab", |
| 159 | sizeof(fpu_context_t), |
159 | sizeof(fpu_context_t), |
| 160 | FPU_CONTEXT_ALIGN, |
160 | FPU_CONTEXT_ALIGN, |
| 161 | NULL, NULL, 0); |
161 | NULL, NULL, 0); |
| 162 | #endif |
162 | #endif |
| 163 | } |
- | |
| 164 | 163 | ||
| - | 164 | btree_create(&threads_btree); |
|
| - | 165 | } |
|
| 165 | 166 | ||
| 166 | /** Make thread ready |
167 | /** Make thread ready |
| 167 | * |
168 | * |
| 168 | * Switch thread t to the ready state. |
169 | * Switch thread t to the ready state. |
| 169 | * |
170 | * |
| Line 206... | Line 207... | ||
| 206 | atomic_inc(&cpu->nrdy); |
207 | atomic_inc(&cpu->nrdy); |
| 207 | 208 | ||
| 208 | interrupts_restore(ipl); |
209 | interrupts_restore(ipl); |
| 209 | } |
210 | } |
| 210 | 211 | ||
| 211 | - | ||
| 212 | /** Destroy thread memory structure |
212 | /** Destroy thread memory structure |
| 213 | * |
213 | * |
| 214 | * Detach thread from all queues, cpus etc. and destroy it. |
214 | * Detach thread from all queues, cpus etc. and destroy it. |
| 215 | * |
215 | * |
| 216 | * Assume thread->lock is held!! |
216 | * Assume thread->lock is held!! |
| Line 234... | Line 234... | ||
| 234 | spinlock_unlock(&t->task->lock); |
234 | spinlock_unlock(&t->task->lock); |
| 235 | 235 | ||
| 236 | spinlock_unlock(&t->lock); |
236 | spinlock_unlock(&t->lock); |
| 237 | 237 | ||
| 238 | spinlock_lock(&threads_lock); |
238 | spinlock_lock(&threads_lock); |
| 239 | list_remove(&t->threads_link); |
239 | btree_remove(&threads_btree, (__native) t, NULL); |
| 240 | spinlock_unlock(&threads_lock); |
240 | spinlock_unlock(&threads_lock); |
| 241 | 241 | ||
| 242 | slab_free(thread_slab, t); |
242 | slab_free(thread_slab, t); |
| 243 | } |
243 | } |
| 244 | 244 | ||
| 245 | - | ||
| 246 | /** Create new thread |
245 | /** Create new thread |
| 247 | * |
246 | * |
| 248 | * Create a new thread. |
247 | * Create a new thread. |
| 249 | * |
248 | * |
| 250 | * @param func Thread's implementing function. |
249 | * @param func Thread's implementing function. |
| Line 309... | Line 308... | ||
| 309 | /* |
308 | /* |
| 310 | * Register this thread in the system-wide list. |
309 | * Register this thread in the system-wide list. |
| 311 | */ |
310 | */ |
| 312 | ipl = interrupts_disable(); |
311 | ipl = interrupts_disable(); |
| 313 | spinlock_lock(&threads_lock); |
312 | spinlock_lock(&threads_lock); |
| 314 | list_append(&t->threads_link, &threads_head); |
313 | btree_insert(&threads_btree, (__native) t, (void *) t, NULL); |
| 315 | spinlock_unlock(&threads_lock); |
314 | spinlock_unlock(&threads_lock); |
| 316 | 315 | ||
| 317 | /* |
316 | /* |
| 318 | * Attach to the containing task. |
317 | * Attach to the containing task. |
| 319 | */ |
318 | */ |
| Line 324... | Line 323... | ||
| 324 | interrupts_restore(ipl); |
323 | interrupts_restore(ipl); |
| 325 | 324 | ||
| 326 | return t; |
325 | return t; |
| 327 | } |
326 | } |
| 328 | 327 | ||
| 329 | - | ||
| 330 | /** Make thread exiting |
328 | /** Make thread exiting |
| 331 | * |
329 | * |
| 332 | * End current thread execution and switch it to the exiting |
330 | * End current thread execution and switch it to the exiting |
| 333 | * state. All pending timeouts are executed. |
331 | * state. All pending timeouts are executed. |
| 334 | * |
332 | * |
| Line 361... | Line 359... | ||
| 361 | void thread_sleep(__u32 sec) |
359 | void thread_sleep(__u32 sec) |
| 362 | { |
360 | { |
| 363 | thread_usleep(sec*1000000); |
361 | thread_usleep(sec*1000000); |
| 364 | } |
362 | } |
| 365 | 363 | ||
| 366 | - | ||
| 367 | /** Thread usleep |
364 | /** Thread usleep |
| 368 | * |
365 | * |
| 369 | * Suspend execution of the current thread. |
366 | * Suspend execution of the current thread. |
| 370 | * |
367 | * |
| 371 | * @param usec Number of microseconds to sleep. |
368 | * @param usec Number of microseconds to sleep. |
| Line 378... | Line 375... | ||
| 378 | waitq_initialize(&wq); |
375 | waitq_initialize(&wq); |
| 379 | 376 | ||
| 380 | (void) waitq_sleep_timeout(&wq, usec, SYNCH_NON_BLOCKING); |
377 | (void) waitq_sleep_timeout(&wq, usec, SYNCH_NON_BLOCKING); |
| 381 | } |
378 | } |
| 382 | 379 | ||
| 383 | - | ||
| 384 | /** Register thread out-of-context invocation |
380 | /** Register thread out-of-context invocation |
| 385 | * |
381 | * |
| 386 | * Register a function and its argument to be executed |
382 | * Register a function and its argument to be executed |
| 387 | * on next context switch to the current thread. |
383 | * on next context switch to the current thread. |
| 388 | * |
384 | * |
| Line 404... | Line 400... | ||
| 404 | 400 | ||
| 405 | /** Print list of threads debug info */ |
401 | /** Print list of threads debug info */ |
| 406 | void thread_print_list(void) |
402 | void thread_print_list(void) |
| 407 | { |
403 | { |
| 408 | link_t *cur; |
404 | link_t *cur; |
| 409 | thread_t *t; |
- | |
| 410 | ipl_t ipl; |
405 | ipl_t ipl; |
| 411 | 406 | ||
| 412 | /* Messing with thread structures, avoid deadlock */ |
407 | /* Messing with thread structures, avoid deadlock */ |
| 413 | ipl = interrupts_disable(); |
408 | ipl = interrupts_disable(); |
| 414 | spinlock_lock(&threads_lock); |
409 | spinlock_lock(&threads_lock); |
| 415 | 410 | ||
| 416 | for (cur=threads_head.next; cur!=&threads_head; cur=cur->next) { |
411 | for (cur = threads_btree.leaf_head.next; cur != &threads_btree.leaf_head; cur = cur->next) { |
| - | 412 | btree_node_t *node; |
|
| - | 413 | int i; |
|
| - | 414 | ||
| 417 | t = list_get_instance(cur, thread_t, threads_link); |
415 | node = list_get_instance(cur, btree_node_t, leaf_link); |
| - | 416 | for (i = 0; i < node->keys; i++) { |
|
| - | 417 | thread_t *t; |
|
| - | 418 | ||
| - | 419 | t = (thread_t *) node->value[i]; |
|
| 418 | printf("%s: address=%P, tid=%d, state=%s, task=%P, code=%P, stack=%P, cpu=", |
420 | printf("%s: address=%P, tid=%d, state=%s, task=%P, code=%P, stack=%P, cpu=", |
| 419 | t->name, t, t->tid, thread_states[t->state], t->task, t->thread_code, t->kstack); |
421 | t->name, t, t->tid, thread_states[t->state], t->task, t->thread_code, t->kstack); |
| 420 | if (t->cpu) |
422 | if (t->cpu) |
| 421 | printf("cpu%d ", t->cpu->id); |
423 | printf("cpu%d ", t->cpu->id); |
| 422 | else |
424 | else |
| 423 | printf("none"); |
425 | printf("none"); |
| 424 | printf("\n"); |
426 | printf("\n"); |
| - | 427 | } |
|
| 425 | } |
428 | } |
| 426 | 429 | ||
| 427 | spinlock_unlock(&threads_lock); |
430 | spinlock_unlock(&threads_lock); |
| 428 | interrupts_restore(ipl); |
431 | interrupts_restore(ipl); |
| 429 | } |
432 | } |
| 430 | 433 | ||
| - | 434 | /** Check whether thread exists. |
|
| - | 435 | * |
|
| - | 436 | * Note that threads_lock must be already held and |
|
| - | 437 | * interrupts must be already disabled. |
|
| - | 438 | * |
|
| - | 439 | * @param t Pointer to thread. |
|
| - | 440 | * |
|
| - | 441 | * @return True if thread t is known to the system, false otherwise. |
|
| - | 442 | */ |
|
| - | 443 | bool thread_exists(thread_t *t) |
|
| - | 444 | { |
|
| - | 445 | btree_node_t *leaf; |
|
| - | 446 | ||
| - | 447 | return btree_search(&threads_btree, (__native) t, &leaf) != NULL; |
|
| - | 448 | } |
|
| - | 449 | ||
| 431 | /** Process syscall to create new thread. |
450 | /** Process syscall to create new thread. |
| 432 | * |
451 | * |
| 433 | */ |
452 | */ |
| 434 | __native sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name) |
453 | __native sys_thread_create(uspace_arg_t *uspace_uarg, char *uspace_name) |
| 435 | { |
454 | { |