Rev 788 | Rev 791 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 788 | Rev 789 | ||
---|---|---|---|
Line 110... | Line 110... | ||
110 | 110 | ||
111 | /** Magazine cache */ |
111 | /** Magazine cache */ |
112 | static slab_cache_t mag_cache; |
112 | static slab_cache_t mag_cache; |
113 | /** Cache for cache descriptors */ |
113 | /** Cache for cache descriptors */ |
114 | static slab_cache_t slab_cache_cache; |
114 | static slab_cache_t slab_cache_cache; |
115 | - | ||
- | 115 | /** Cache for magcache structure from cache_t */ |
|
- | 116 | static slab_cache_t *cpu_cache = NULL; |
|
116 | /** Cache for external slab descriptors |
117 | /** Cache for external slab descriptors |
117 | * This time we want per-cpu cache, so do not make it static |
118 | * This time we want per-cpu cache, so do not make it static |
118 | * - using SLAB for internal SLAB structures will not deadlock, |
119 | * - using SLAB for internal SLAB structures will not deadlock, |
119 | * as all slab structures are 'small' - control structures of |
120 | * as all slab structures are 'small' - control structures of |
120 | * their caches do not require further allocation |
121 | * their caches do not require further allocation |
Line 232... | Line 233... | ||
232 | 233 | ||
233 | if (!slab) |
234 | if (!slab) |
234 | slab = obj2slab(obj); |
235 | slab = obj2slab(obj); |
235 | 236 | ||
236 | ASSERT(slab->cache == cache); |
237 | ASSERT(slab->cache == cache); |
237 | ASSERT(slab->available < cache->objects); |
- | |
238 | 238 | ||
239 | if (cache->destructor) |
239 | if (cache->destructor) |
240 | freed = cache->destructor(obj); |
240 | freed = cache->destructor(obj); |
241 | 241 | ||
242 | spinlock_lock(&cache->slablock); |
242 | spinlock_lock(&cache->slablock); |
- | 243 | ASSERT(slab->available < cache->objects); |
|
243 | 244 | ||
244 | *((int *)obj) = slab->nextavail; |
245 | *((int *)obj) = slab->nextavail; |
245 | slab->nextavail = (obj - slab->start)/cache->size; |
246 | slab->nextavail = (obj - slab->start)/cache->size; |
246 | slab->available++; |
247 | slab->available++; |
247 | 248 | ||
Line 534... | Line 535... | ||
534 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
535 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
535 | ssize -= sizeof(slab_t); |
536 | ssize -= sizeof(slab_t); |
536 | return ssize - objects*cache->size; |
537 | return ssize - objects*cache->size; |
537 | } |
538 | } |
538 | 539 | ||
- | 540 | /** |
|
- | 541 | * Initialize mag_cache structure in slab cache |
|
- | 542 | */ |
|
- | 543 | static void make_magcache(slab_cache_t *cache) |
|
- | 544 | { |
|
- | 545 | int i; |
|
- | 546 | ||
- | 547 | ASSERT(cpu_cache); |
|
- | 548 | cache->mag_cache = slab_alloc(cpu_cache, 0); |
|
- | 549 | for (i=0; i < config.cpu_count; i++) { |
|
- | 550 | memsetb((__address)&cache->mag_cache[i], |
|
- | 551 | sizeof(cache->mag_cache[i]), 0); |
|
- | 552 | spinlock_initialize(&cache->mag_cache[i].lock, |
|
- | 553 | "slab_maglock_cpu"); |
|
- | 554 | } |
|
- | 555 | } |
|
- | 556 | ||
539 | /** Initialize allocated memory as a slab cache */ |
557 | /** Initialize allocated memory as a slab cache */ |
540 | static void |
558 | static void |
541 | _slab_cache_create(slab_cache_t *cache, |
559 | _slab_cache_create(slab_cache_t *cache, |
542 | char *name, |
560 | char *name, |
543 | size_t size, |
561 | size_t size, |
544 | size_t align, |
562 | size_t align, |
545 | int (*constructor)(void *obj, int kmflag), |
563 | int (*constructor)(void *obj, int kmflag), |
546 | int (*destructor)(void *obj), |
564 | int (*destructor)(void *obj), |
547 | int flags) |
565 | int flags) |
548 | { |
566 | { |
549 | int i; |
- | |
550 | int pages; |
567 | int pages; |
551 | ipl_t ipl; |
568 | ipl_t ipl; |
552 | 569 | ||
553 | memsetb((__address)cache, sizeof(*cache), 0); |
570 | memsetb((__address)cache, sizeof(*cache), 0); |
554 | cache->name = name; |
571 | cache->name = name; |
Line 566... | Line 583... | ||
566 | list_initialize(&cache->full_slabs); |
583 | list_initialize(&cache->full_slabs); |
567 | list_initialize(&cache->partial_slabs); |
584 | list_initialize(&cache->partial_slabs); |
568 | list_initialize(&cache->magazines); |
585 | list_initialize(&cache->magazines); |
569 | spinlock_initialize(&cache->slablock, "slab_lock"); |
586 | spinlock_initialize(&cache->slablock, "slab_lock"); |
570 | spinlock_initialize(&cache->maglock, "slab_maglock"); |
587 | spinlock_initialize(&cache->maglock, "slab_maglock"); |
571 | if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) { |
588 | if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) |
572 | for (i=0; i < config.cpu_count; i++) { |
- | |
573 | memsetb((__address)&cache->mag_cache[i], |
- | |
574 | sizeof(cache->mag_cache[i]), 0); |
589 | make_magcache(cache); |
575 | spinlock_initialize(&cache->mag_cache[i].lock, |
- | |
576 | "slab_maglock_cpu"); |
- | |
577 | } |
- | |
578 | } |
- | |
579 | 590 | ||
580 | /* Compute slab sizes, object counts in slabs etc. */ |
591 | /* Compute slab sizes, object counts in slabs etc. */ |
581 | if (cache->size < SLAB_INSIDE_SIZE) |
592 | if (cache->size < SLAB_INSIDE_SIZE) |
582 | cache->flags |= SLAB_CACHE_SLINSIDE; |
593 | cache->flags |= SLAB_CACHE_SLINSIDE; |
583 | 594 | ||
Line 694... | Line 705... | ||
694 | /* All slabs must be empty */ |
705 | /* All slabs must be empty */ |
695 | if (!list_empty(&cache->full_slabs) \ |
706 | if (!list_empty(&cache->full_slabs) \ |
696 | || !list_empty(&cache->partial_slabs)) |
707 | || !list_empty(&cache->partial_slabs)) |
697 | panic("Destroying cache that is not empty."); |
708 | panic("Destroying cache that is not empty."); |
698 | 709 | ||
- | 710 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) |
|
- | 711 | slab_free(cpu_cache, cache->mag_cache); |
|
699 | slab_free(&slab_cache_cache, cache); |
712 | slab_free(&slab_cache_cache, cache); |
700 | } |
713 | } |
701 | 714 | ||
702 | /** Allocate new object from cache - if no flags given, always returns |
715 | /** Allocate new object from cache - if no flags given, always returns |
703 | memory */ |
716 | memory */ |
Line 808... | Line 821... | ||
808 | NULL, NULL, |
821 | NULL, NULL, |
809 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
822 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
810 | /* Initialize slab_cache cache */ |
823 | /* Initialize slab_cache cache */ |
811 | _slab_cache_create(&slab_cache_cache, |
824 | _slab_cache_create(&slab_cache_cache, |
812 | "slab_cache", |
825 | "slab_cache", |
813 | sizeof(slab_cache_cache) + config.cpu_count*sizeof(slab_cache_cache.mag_cache[0]), |
826 | sizeof(slab_cache_cache), |
814 | sizeof(__address), |
827 | sizeof(__address), |
815 | NULL, NULL, |
828 | NULL, NULL, |
816 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
829 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
817 | /* Initialize external slab cache */ |
830 | /* Initialize external slab cache */ |
818 | slab_extern_cache = slab_cache_create("slab_extern", |
831 | slab_extern_cache = slab_cache_create("slab_extern", |
819 | sizeof(slab_t), |
832 | sizeof(slab_t), |
820 | 0, NULL, NULL, |
833 | 0, NULL, NULL, |
821 | SLAB_CACHE_SLINSIDE); |
834 | SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED); |
822 | 835 | ||
823 | /* Initialize structures for malloc */ |
836 | /* Initialize structures for malloc */ |
824 | for (i=0, size=(1<<SLAB_MIN_MALLOC_W); |
837 | for (i=0, size=(1<<SLAB_MIN_MALLOC_W); |
825 | i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1); |
838 | i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1); |
826 | i++, size <<= 1) { |
839 | i++, size <<= 1) { |
827 | malloc_caches[i] = slab_cache_create(malloc_names[i], |
840 | malloc_caches[i] = slab_cache_create(malloc_names[i], |
828 | size, 0, |
841 | size, 0, |
829 | NULL,NULL,0); |
842 | NULL,NULL, SLAB_CACHE_MAGDEFERRED); |
830 | } |
843 | } |
831 | #ifdef CONFIG_DEBUG |
844 | #ifdef CONFIG_DEBUG |
832 | _slab_initialized = 1; |
845 | _slab_initialized = 1; |
833 | #endif |
846 | #endif |
834 | } |
847 | } |
835 | 848 | ||
- | 849 | /** Enable cpu_cache |
|
- | 850 | * |
|
- | 851 | * Kernel calls this function, when it knows the real number of |
|
- | 852 | * processors. |
|
- | 853 | * Allocate slab for cpucache and enable it on all existing |
|
- | 854 | * slabs that are SLAB_CACHE_MAGDEFERRED |
|
- | 855 | */ |
|
- | 856 | void slab_enable_cpucache(void) |
|
- | 857 | { |
|
- | 858 | link_t *cur; |
|
- | 859 | slab_cache_t *s; |
|
- | 860 | ||
- | 861 | cpu_cache = slab_cache_create("magcpucache", |
|
- | 862 | sizeof(slab_mag_cache_t) * config.cpu_count, |
|
- | 863 | 0, NULL, NULL, |
|
- | 864 | SLAB_CACHE_NOMAGAZINE); |
|
- | 865 | spinlock_lock(&slab_cache_lock); |
|
- | 866 | ||
- | 867 | for (cur=slab_cache_list.next; cur != &slab_cache_list;cur=cur->next){ |
|
- | 868 | s = list_get_instance(cur, slab_cache_t, link); |
|
- | 869 | if ((s->flags & SLAB_CACHE_MAGDEFERRED) != SLAB_CACHE_MAGDEFERRED) |
|
- | 870 | continue; |
|
- | 871 | make_magcache(s); |
|
- | 872 | s->flags &= ~SLAB_CACHE_MAGDEFERRED; |
|
- | 873 | } |
|
- | 874 | ||
- | 875 | spinlock_unlock(&slab_cache_lock); |
|
- | 876 | } |
|
- | 877 | ||
836 | /**************************************/ |
878 | /**************************************/ |
837 | /* kalloc/kfree functions */ |
879 | /* kalloc/kfree functions */ |
838 | void * kalloc(unsigned int size, int flags) |
880 | void * kalloc(unsigned int size, int flags) |
839 | { |
881 | { |
840 | int idx; |
882 | int idx; |