Rev 3180 | Rev 3972 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 3180 | Rev 3183 | ||
---|---|---|---|
Line 792... | Line 792... | ||
792 | 792 | ||
793 | 793 | ||
794 | /* Print list of slabs */ |
794 | /* Print list of slabs */ |
795 | void slab_print_list(void) |
795 | void slab_print_list(void) |
796 | { |
796 | { |
797 | slab_cache_t *cache; |
- | |
798 | link_t *cur; |
- | |
799 | ipl_t ipl; |
797 | int skip = 0; |
800 | 798 | ||
801 | ipl = interrupts_disable(); |
- | |
802 | spinlock_lock(&slab_cache_lock); |
- | |
803 | printf("slab name size pages obj/pg slabs cached allocated" |
799 | printf("slab name size pages obj/pg slabs cached allocated" |
804 | " ctl\n"); |
800 | " ctl\n"); |
805 | printf("---------------- -------- ------ ------ ------ ------ ---------" |
801 | printf("---------------- -------- ------ ------ ------ ------ ---------" |
806 | " ---\n"); |
802 | " ---\n"); |
807 | 803 | ||
- | 804 | while (true) { |
|
- | 805 | slab_cache_t *cache; |
|
- | 806 | link_t *cur; |
|
- | 807 | ipl_t ipl; |
|
- | 808 | int i; |
|
- | 809 | ||
- | 810 | /* |
|
- | 811 | * We must not hold the slab_cache_lock spinlock when printing |
|
- | 812 | * the statistics. Otherwise we can easily deadlock if the print |
|
- | 813 | * needs to allocate memory. |
|
- | 814 | * |
|
- | 815 | * Therefore, we walk through the slab cache list, skipping some |
|
- | 816 | * amount of already processed caches during each iteration and |
|
- | 817 | * gathering statistics about the first unprocessed cache. For |
|
- | 818 | * the sake of printing the statistics, we realese the |
|
- | 819 | * slab_cache_lock and reacquire it afterwards. Then the walk |
|
- | 820 | * starts again. |
|
- | 821 | * |
|
- | 822 | * This limits both the efficiency and also accuracy of the |
|
- | 823 | * obtained statistics. The efficiency is decreased because the |
|
- | 824 | * time complexity of the algorithm is quadratic instead of |
|
- | 825 | * linear. The accuracy is impacted because we drop the lock |
|
- | 826 | * after processing one cache. If there is someone else |
|
- | 827 | * manipulating the cache list, we might omit an arbitrary |
|
- | 828 | * number of caches or process one cache multiple times. |
|
- | 829 | * However, we don't bleed for this algorithm for it is only |
|
- | 830 | * statistics. |
|
- | 831 | */ |
|
- | 832 | ||
- | 833 | ipl = interrupts_disable(); |
|
- | 834 | spinlock_lock(&slab_cache_lock); |
|
- | 835 | ||
- | 836 | for (i = 0, cur = slab_cache_list.next; |
|
808 | for (cur = slab_cache_list.next; cur != &slab_cache_list; |
837 | i < skip && cur != &slab_cache_list; |
809 | cur = cur->next) { |
838 | i++, cur = cur->next) |
- | 839 | ; |
|
- | 840 | ||
- | 841 | if (cur == &slab_cache_list) { |
|
- | 842 | spinlock_unlock(&slab_cache_lock); |
|
- | 843 | interrupts_restore(ipl); |
|
- | 844 | break; |
|
- | 845 | } |
|
- | 846 | ||
- | 847 | skip++; |
|
- | 848 | ||
810 | cache = list_get_instance(cur, slab_cache_t, link); |
849 | cache = list_get_instance(cur, slab_cache_t, link); |
- | 850 | ||
- | 851 | char *name = cache->name; |
|
- | 852 | uint8_t order = cache->order; |
|
- | 853 | size_t size = cache->size; |
|
- | 854 | unsigned int objects = cache->objects; |
|
- | 855 | long allocated_slabs = atomic_get(&cache->allocated_slabs); |
|
- | 856 | long cached_objs = atomic_get(&cache->cached_objs); |
|
- | 857 | long allocated_objs = atomic_get(&cache->allocated_objs); |
|
- | 858 | int flags = cache->flags; |
|
- | 859 | ||
- | 860 | spinlock_unlock(&slab_cache_lock); |
|
- | 861 | interrupts_restore(ipl); |
|
811 | 862 | ||
812 | printf("%-16s %8" PRIs " %6d %6u %6ld %6ld %9ld %-3s\n", |
863 | printf("%-16s %8" PRIs " %6d %6u %6ld %6ld %9ld %-3s\n", |
813 | cache->name, cache->size, (1 << cache->order), |
- | |
814 | cache->objects, atomic_get(&cache->allocated_slabs), |
864 | name, size, (1 << order), objects, allocated_slabs, |
815 | atomic_get(&cache->cached_objs), |
- | |
816 | atomic_get(&cache->allocated_objs), |
865 | cached_objs, allocated_objs, |
817 | cache->flags & SLAB_CACHE_SLINSIDE ? "in" : "out"); |
866 | flags & SLAB_CACHE_SLINSIDE ? "in" : "out"); |
818 | } |
867 | } |
819 | spinlock_unlock(&slab_cache_lock); |
- | |
820 | interrupts_restore(ipl); |
- | |
821 | } |
868 | } |
822 | 869 | ||
823 | void slab_cache_init(void) |
870 | void slab_cache_init(void) |
824 | { |
871 | { |
825 | int i, size; |
872 | int i, size; |