Rev 2487 | Rev 2504 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 2487 | Rev 2502 | ||
---|---|---|---|
Line 49... | Line 49... | ||
49 | #include <synch/waitq.h> |
49 | #include <synch/waitq.h> |
50 | #include <synch/rwlock.h> |
50 | #include <synch/rwlock.h> |
51 | #include <cpu.h> |
51 | #include <cpu.h> |
52 | #include <func.h> |
52 | #include <func.h> |
53 | #include <context.h> |
53 | #include <context.h> |
54 | #include <adt/btree.h> |
54 | #include <adt/avl.h> |
55 | #include <adt/list.h> |
55 | #include <adt/list.h> |
56 | #include <time/clock.h> |
56 | #include <time/clock.h> |
57 | #include <time/timeout.h> |
57 | #include <time/timeout.h> |
58 | #include <config.h> |
58 | #include <config.h> |
59 | #include <arch/interrupt.h> |
59 | #include <arch/interrupt.h> |
Line 79... | Line 79... | ||
79 | "Entering", |
79 | "Entering", |
80 | "Exiting", |
80 | "Exiting", |
81 | "Lingering" |
81 | "Lingering" |
82 | }; |
82 | }; |
83 | 83 | ||
84 | /** Lock protecting the threads_btree B+tree. |
84 | /** Lock protecting the threads_tree AVL tree. |
85 | * |
85 | * |
86 | * For locking rules, see declaration thereof. |
86 | * For locking rules, see declaration thereof. |
87 | */ |
87 | */ |
88 | SPINLOCK_INITIALIZE(threads_lock); |
88 | SPINLOCK_INITIALIZE(threads_lock); |
89 | 89 | ||
90 | /** B+tree of all threads. |
90 | /** ALV tree of all threads. |
91 | * |
91 | * |
92 | * When a thread is found in the threads_btree B+tree, it is guaranteed to |
92 | * When a thread is found in the threads_tree AVL tree, it is guaranteed to |
93 | * exist as long as the threads_lock is held. |
93 | * exist as long as the threads_lock is held. |
94 | */ |
94 | */ |
95 | btree_t threads_btree; |
95 | avltree_t threads_tree; |
96 | 96 | ||
97 | SPINLOCK_INITIALIZE(tidlock); |
97 | SPINLOCK_INITIALIZE(tidlock); |
98 | thread_id_t last_tid = 0; |
98 | thread_id_t last_tid = 0; |
99 | 99 | ||
100 | static slab_cache_t *thread_slab; |
100 | static slab_cache_t *thread_slab; |
Line 210... | Line 210... | ||
210 | #ifdef ARCH_HAS_FPU |
210 | #ifdef ARCH_HAS_FPU |
211 | fpu_context_slab = slab_cache_create("fpu_slab", sizeof(fpu_context_t), |
211 | fpu_context_slab = slab_cache_create("fpu_slab", sizeof(fpu_context_t), |
212 | FPU_CONTEXT_ALIGN, NULL, NULL, 0); |
212 | FPU_CONTEXT_ALIGN, NULL, NULL, 0); |
213 | #endif |
213 | #endif |
214 | 214 | ||
215 | btree_create(&threads_btree); |
215 | avltree_create(&threads_tree); |
216 | } |
216 | } |
217 | 217 | ||
218 | /** Make thread ready |
218 | /** Make thread ready |
219 | * |
219 | * |
220 | * Switch thread t to the ready state. |
220 | * Switch thread t to the ready state. |
Line 337... | Line 337... | ||
337 | t->task = task; |
337 | t->task = task; |
338 | 338 | ||
339 | t->fpu_context_exists = 0; |
339 | t->fpu_context_exists = 0; |
340 | t->fpu_context_engaged = 0; |
340 | t->fpu_context_engaged = 0; |
341 | 341 | ||
- | 342 | avltree_node_initialize(&t->threads_tree_node); |
|
- | 343 | t->threads_tree_node.key = (uintptr_t) t; |
|
- | 344 | ||
342 | /* might depend on previous initialization */ |
345 | /* might depend on previous initialization */ |
343 | thread_create_arch(t); |
346 | thread_create_arch(t); |
344 | 347 | ||
345 | if (!(flags & THREAD_FLAG_NOATTACH)) |
348 | if (!(flags & THREAD_FLAG_NOATTACH)) |
346 | thread_attach(t, task); |
349 | thread_attach(t, task); |
Line 366... | Line 369... | ||
366 | spinlock_unlock(&t->cpu->lock); |
369 | spinlock_unlock(&t->cpu->lock); |
367 | 370 | ||
368 | spinlock_unlock(&t->lock); |
371 | spinlock_unlock(&t->lock); |
369 | 372 | ||
370 | spinlock_lock(&threads_lock); |
373 | spinlock_lock(&threads_lock); |
371 | btree_remove(&threads_btree, (btree_key_t) ((uintptr_t ) t), NULL); |
374 | avltree_delete(&threads_tree, &t->threads_tree_node); |
372 | spinlock_unlock(&threads_lock); |
375 | spinlock_unlock(&threads_lock); |
373 | 376 | ||
374 | /* |
377 | /* |
375 | * Detach from the containing task. |
378 | * Detach from the containing task. |
376 | */ |
379 | */ |
Line 389... | Line 392... | ||
389 | } |
392 | } |
390 | 393 | ||
391 | /** Make the thread visible to the system. |
394 | /** Make the thread visible to the system. |
392 | * |
395 | * |
393 | * Attach the thread structure to the current task and make it visible in the |
396 | * Attach the thread structure to the current task and make it visible in the |
394 | * threads_btree. |
397 | * threads_tree. |
395 | * |
398 | * |
396 | * @param t Thread to be attached to the task. |
399 | * @param t Thread to be attached to the task. |
397 | * @param task Task to which the thread is to be attached. |
400 | * @param task Task to which the thread is to be attached. |
398 | */ |
401 | */ |
399 | void thread_attach(thread_t *t, task_t *task) |
402 | void thread_attach(thread_t *t, task_t *task) |
Line 412... | Line 415... | ||
412 | 415 | ||
413 | /* |
416 | /* |
414 | * Register this thread in the system-wide list. |
417 | * Register this thread in the system-wide list. |
415 | */ |
418 | */ |
416 | spinlock_lock(&threads_lock); |
419 | spinlock_lock(&threads_lock); |
417 | btree_insert(&threads_btree, (btree_key_t) ((uintptr_t) t), (void *) t, |
420 | avltree_insert(&threads_tree, &t->threads_tree_node); |
418 | NULL); |
- | |
419 | spinlock_unlock(&threads_lock); |
421 | spinlock_unlock(&threads_lock); |
420 | 422 | ||
421 | interrupts_restore(ipl); |
423 | interrupts_restore(ipl); |
422 | } |
424 | } |
423 | 425 | ||
Line 573... | Line 575... | ||
573 | THREAD->call_me_with = call_me_with; |
575 | THREAD->call_me_with = call_me_with; |
574 | spinlock_unlock(&THREAD->lock); |
576 | spinlock_unlock(&THREAD->lock); |
575 | interrupts_restore(ipl); |
577 | interrupts_restore(ipl); |
576 | } |
578 | } |
577 | 579 | ||
- | 580 | static void thread_walker(avltree_node_t *node) |
|
- | 581 | { |
|
- | 582 | thread_t *t; |
|
- | 583 | ||
- | 584 | t = avltree_get_instance(node, thread_t, threads_tree_node); |
|
- | 585 | ||
- | 586 | uint64_t cycles; |
|
- | 587 | char suffix; |
|
- | 588 | order(t->cycles, &cycles, &suffix); |
|
- | 589 | ||
- | 590 | printf("%-6llu %-10s %#10zx %-8s %#10zx %-3ld %#10zx %#10zx %9llu%c ", |
|
- | 591 | t->tid, t->name, t, thread_states[t->state], t->task, |
|
- | 592 | t->task->context, t->thread_code, t->kstack, cycles, suffix); |
|
- | 593 | ||
- | 594 | if (t->cpu) |
|
- | 595 | printf("%-4zd", t->cpu->id); |
|
- | 596 | else |
|
- | 597 | printf("none"); |
|
- | 598 | ||
- | 599 | if (t->state == Sleeping) |
|
- | 600 | printf(" %#10zx", t->sleep_queue); |
|
- | 601 | ||
- | 602 | printf("\n"); |
|
- | 603 | } |
|
- | 604 | ||
578 | /** Print list of threads debug info */ |
605 | /** Print list of threads debug info */ |
579 | void thread_print_list(void) |
606 | void thread_print_list(void) |
580 | { |
607 | { |
581 | link_t *cur; |
- | |
582 | ipl_t ipl; |
608 | ipl_t ipl; |
583 | 609 | ||
584 | /* Messing with thread structures, avoid deadlock */ |
610 | /* Messing with thread structures, avoid deadlock */ |
585 | ipl = interrupts_disable(); |
611 | ipl = interrupts_disable(); |
586 | spinlock_lock(&threads_lock); |
612 | spinlock_lock(&threads_lock); |
Line 588... | Line 614... | ||
588 | printf("tid name address state task ctx code " |
614 | printf("tid name address state task ctx code " |
589 | " stack cycles cpu waitqueue\n"); |
615 | " stack cycles cpu waitqueue\n"); |
590 | printf("------ ---------- ---------- -------- ---------- --- --------" |
616 | printf("------ ---------- ---------- -------- ---------- --- --------" |
591 | "-- ---------- ---------- ---- ---------\n"); |
617 | "-- ---------- ---------- ---- ---------\n"); |
592 | 618 | ||
593 | for (cur = threads_btree.leaf_head.next; |
619 | avltree_walk(&threads_tree, thread_walker); |
594 | cur != &threads_btree.leaf_head; cur = cur->next) { |
- | |
595 | btree_node_t *node; |
- | |
596 | unsigned int i; |
- | |
597 | - | ||
598 | node = list_get_instance(cur, btree_node_t, leaf_link); |
- | |
599 | for (i = 0; i < node->keys; i++) { |
- | |
600 | thread_t *t; |
- | |
601 | - | ||
602 | t = (thread_t *) node->value[i]; |
- | |
603 | - | ||
604 | uint64_t cycles; |
- | |
605 | char suffix; |
- | |
606 | order(t->cycles, &cycles, &suffix); |
- | |
607 | - | ||
608 | printf("%-6llu %-10s %#10zx %-8s %#10zx %-3ld %#10zx " |
- | |
609 | "%#10zx %9llu%c ", t->tid, t->name, t, |
- | |
610 | thread_states[t->state], t->task, t->task->context, |
- | |
611 | t->thread_code, t->kstack, cycles, suffix); |
- | |
612 | - | ||
613 | if (t->cpu) |
- | |
614 | printf("%-4zd", t->cpu->id); |
- | |
615 | else |
- | |
616 | printf("none"); |
- | |
617 | - | ||
618 | if (t->state == Sleeping) |
- | |
619 | printf(" %#10zx", t->sleep_queue); |
- | |
620 | - | ||
621 | printf("\n"); |
- | |
622 | } |
- | |
623 | } |
- | |
624 | 620 | ||
625 | spinlock_unlock(&threads_lock); |
621 | spinlock_unlock(&threads_lock); |
626 | interrupts_restore(ipl); |
622 | interrupts_restore(ipl); |
627 | } |
623 | } |
628 | 624 | ||
Line 635... | Line 631... | ||
635 | * |
631 | * |
636 | * @return True if thread t is known to the system, false otherwise. |
632 | * @return True if thread t is known to the system, false otherwise. |
637 | */ |
633 | */ |
638 | bool thread_exists(thread_t *t) |
634 | bool thread_exists(thread_t *t) |
639 | { |
635 | { |
640 | btree_node_t *leaf; |
636 | avltree_node_t *node; |
- | 637 | ||
- | 638 | node = avltree_search(&threads_tree, (avltree_key_t) ((uintptr_t) t)); |
|
641 | 639 | ||
642 | return btree_search(&threads_btree, (btree_key_t) ((uintptr_t) t), |
- | |
643 | &leaf) != NULL; |
640 | return node != NULL; |
644 | } |
641 | } |
645 | 642 | ||
646 | 643 | ||
647 | /** Update accounting of current thread. |
644 | /** Update accounting of current thread. |
648 | * |
645 | * |