Rev 789 | Rev 814 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
| Rev 789 | Rev 791 | ||
|---|---|---|---|
| Line 110... | Line 110... | ||
| 110 | 110 | ||
| 111 | /** Magazine cache */ |
111 | /** Magazine cache */ |
| 112 | static slab_cache_t mag_cache; |
112 | static slab_cache_t mag_cache; |
| 113 | /** Cache for cache descriptors */ |
113 | /** Cache for cache descriptors */ |
| 114 | static slab_cache_t slab_cache_cache; |
114 | static slab_cache_t slab_cache_cache; |
| 115 | /** Cache for magcache structure from cache_t */ |
- | |
| 116 | static slab_cache_t *cpu_cache = NULL; |
- | |
| 117 | /** Cache for external slab descriptors |
115 | /** Cache for external slab descriptors |
| 118 | * This time we want per-cpu cache, so do not make it static |
116 | * This time we want per-cpu cache, so do not make it static |
| 119 | * - using SLAB for internal SLAB structures will not deadlock, |
117 | * - using SLAB for internal SLAB structures will not deadlock, |
| 120 | * as all slab structures are 'small' - control structures of |
118 | * as all slab structures are 'small' - control structures of |
| 121 | * their caches do not require further allocation |
119 | * their caches do not require further allocation |
| 122 | */ |
120 | */ |
| 123 | static slab_cache_t *slab_extern_cache; |
121 | static slab_cache_t *slab_extern_cache; |
| 124 | /** Caches for malloc */ |
122 | /** Caches for malloc */ |
| 125 | static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1]; |
123 | static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1]; |
| 126 | char *malloc_names[] = { |
124 | char *malloc_names[] = { |
| 127 | "malloc-8","malloc-16","malloc-32","malloc-64","malloc-128", |
125 | "malloc-16","malloc-32","malloc-64","malloc-128", |
| 128 | "malloc-256","malloc-512","malloc-1K","malloc-2K", |
126 | "malloc-256","malloc-512","malloc-1K","malloc-2K", |
| 129 | "malloc-4K","malloc-8K","malloc-16K","malloc-32K", |
127 | "malloc-4K","malloc-8K","malloc-16K","malloc-32K", |
| 130 | "malloc-64K","malloc-128K" |
128 | "malloc-64K","malloc-128K" |
| 131 | }; |
129 | }; |
| 132 | 130 | ||
| Line 137... | Line 135... | ||
| 137 | void *start; /**< Start address of first available item */ |
135 | void *start; /**< Start address of first available item */ |
| 138 | count_t available; /**< Count of available items in this slab */ |
136 | count_t available; /**< Count of available items in this slab */ |
| 139 | index_t nextavail; /**< The index of next available item */ |
137 | index_t nextavail; /**< The index of next available item */ |
| 140 | }slab_t; |
138 | }slab_t; |
| 141 | 139 | ||
| - | 140 | #ifdef CONFIG_DEBUG |
|
| - | 141 | static int _slab_initialized = 0; |
|
| - | 142 | #endif |
|
| - | 143 | ||
| 142 | /**************************************/ |
144 | /**************************************/ |
| 143 | /* SLAB allocation functions */ |
145 | /* SLAB allocation functions */ |
| 144 | 146 | ||
| 145 | /** |
147 | /** |
| 146 | * Allocate frames for slab space and initialize |
148 | * Allocate frames for slab space and initialize |
| Line 541... | Line 543... | ||
| 541 | * Initialize mag_cache structure in slab cache |
543 | * Initialize mag_cache structure in slab cache |
| 542 | */ |
544 | */ |
| 543 | static void make_magcache(slab_cache_t *cache) |
545 | static void make_magcache(slab_cache_t *cache) |
| 544 | { |
546 | { |
| 545 | int i; |
547 | int i; |
| - | 548 | ||
| - | 549 | ASSERT(_slab_initialized >= 2); |
|
| 546 | 550 | ||
| 547 | ASSERT(cpu_cache); |
- | |
| 548 | cache->mag_cache = slab_alloc(cpu_cache, 0); |
551 | cache->mag_cache = kalloc(sizeof(slab_mag_cache_t)*config.cpu_count,0); |
| 549 | for (i=0; i < config.cpu_count; i++) { |
552 | for (i=0; i < config.cpu_count; i++) { |
| 550 | memsetb((__address)&cache->mag_cache[i], |
553 | memsetb((__address)&cache->mag_cache[i], |
| 551 | sizeof(cache->mag_cache[i]), 0); |
554 | sizeof(cache->mag_cache[i]), 0); |
| 552 | spinlock_initialize(&cache->mag_cache[i].lock, |
555 | spinlock_initialize(&cache->mag_cache[i].lock, |
| 553 | "slab_maglock_cpu"); |
556 | "slab_maglock_cpu"); |
| Line 706... | Line 709... | ||
| 706 | if (!list_empty(&cache->full_slabs) \ |
709 | if (!list_empty(&cache->full_slabs) \ |
| 707 | || !list_empty(&cache->partial_slabs)) |
710 | || !list_empty(&cache->partial_slabs)) |
| 708 | panic("Destroying cache that is not empty."); |
711 | panic("Destroying cache that is not empty."); |
| 709 | 712 | ||
| 710 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) |
713 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) |
| 711 | slab_free(cpu_cache, cache->mag_cache); |
714 | kfree(cache->mag_cache); |
| 712 | slab_free(&slab_cache_cache, cache); |
715 | slab_free(&slab_cache_cache, cache); |
| 713 | } |
716 | } |
| 714 | 717 | ||
| 715 | /** Allocate new object from cache - if no flags given, always returns |
718 | /** Allocate new object from cache - if no flags given, always returns |
| 716 | memory */ |
719 | memory */ |
| Line 803... | Line 806... | ||
| 803 | } |
806 | } |
| 804 | spinlock_unlock(&slab_cache_lock); |
807 | spinlock_unlock(&slab_cache_lock); |
| 805 | interrupts_restore(ipl); |
808 | interrupts_restore(ipl); |
| 806 | } |
809 | } |
| 807 | 810 | ||
| 808 | #ifdef CONFIG_DEBUG |
- | |
| 809 | static int _slab_initialized = 0; |
- | |
| 810 | #endif |
- | |
| 811 | - | ||
| 812 | void slab_cache_init(void) |
811 | void slab_cache_init(void) |
| 813 | { |
812 | { |
| 814 | int i, size; |
813 | int i, size; |
| 815 | 814 | ||
| 816 | /* Initialize magazine cache */ |
815 | /* Initialize magazine cache */ |
| Line 856... | Line 855... | ||
| 856 | void slab_enable_cpucache(void) |
855 | void slab_enable_cpucache(void) |
| 857 | { |
856 | { |
| 858 | link_t *cur; |
857 | link_t *cur; |
| 859 | slab_cache_t *s; |
858 | slab_cache_t *s; |
| 860 | 859 | ||
| 861 | cpu_cache = slab_cache_create("magcpucache", |
- | |
| 862 | sizeof(slab_mag_cache_t) * config.cpu_count, |
- | |
| 863 | 0, NULL, NULL, |
860 | #ifdef CONFIG_DEBUG |
| 864 | SLAB_CACHE_NOMAGAZINE); |
861 | _slab_initialized = 2; |
| - | 862 | #endif |
|
| - | 863 | ||
| 865 | spinlock_lock(&slab_cache_lock); |
864 | spinlock_lock(&slab_cache_lock); |
| 866 | 865 | ||
| 867 | for (cur=slab_cache_list.next; cur != &slab_cache_list;cur=cur->next){ |
866 | for (cur=slab_cache_list.next; cur != &slab_cache_list;cur=cur->next){ |
| 868 | s = list_get_instance(cur, slab_cache_t, link); |
867 | s = list_get_instance(cur, slab_cache_t, link); |
| 869 | if ((s->flags & SLAB_CACHE_MAGDEFERRED) != SLAB_CACHE_MAGDEFERRED) |
868 | if ((s->flags & SLAB_CACHE_MAGDEFERRED) != SLAB_CACHE_MAGDEFERRED) |