Rev 769 | Rev 772 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 769 | Rev 771 | ||
---|---|---|---|
Line 90... | Line 90... | ||
90 | #include <config.h> |
90 | #include <config.h> |
91 | #include <print.h> |
91 | #include <print.h> |
92 | #include <arch.h> |
92 | #include <arch.h> |
93 | #include <panic.h> |
93 | #include <panic.h> |
94 | #include <debug.h> |
94 | #include <debug.h> |
- | 95 | #include <bitops.h> |
|
95 | 96 | ||
96 | SPINLOCK_INITIALIZE(slab_cache_lock); |
97 | SPINLOCK_INITIALIZE(slab_cache_lock); |
97 | static LIST_INITIALIZE(slab_cache_list); |
98 | static LIST_INITIALIZE(slab_cache_list); |
98 | 99 | ||
99 | /** Magazine cache */ |
100 | /** Magazine cache */ |
Line 106... | Line 107... | ||
106 | * - using SLAB for internal SLAB structures will not deadlock, |
107 | * - using SLAB for internal SLAB structures will not deadlock, |
107 | * as all slab structures are 'small' - control structures of |
108 | * as all slab structures are 'small' - control structures of |
108 | * their caches do not require further allocation |
109 | * their caches do not require further allocation |
109 | */ |
110 | */ |
110 | static slab_cache_t *slab_extern_cache; |
111 | static slab_cache_t *slab_extern_cache; |
- | 112 | /** Caches for malloc */ |
|
- | 113 | static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1]; |
|
- | 114 | char *malloc_names[] = { |
|
- | 115 | "malloc-8","malloc-16","malloc-32","malloc-64","malloc-128", |
|
- | 116 | "malloc-256","malloc-512","malloc-1K","malloc-2K", |
|
- | 117 | "malloc-4K","malloc-8K","malloc-16K","malloc-32K", |
|
- | 118 | "malloc-64K","malloc-128K" |
|
- | 119 | }; |
|
111 | 120 | ||
112 | /** Slab descriptor */ |
121 | /** Slab descriptor */ |
113 | typedef struct { |
122 | typedef struct { |
114 | slab_cache_t *cache; /**< Pointer to parent cache */ |
123 | slab_cache_t *cache; /**< Pointer to parent cache */ |
115 | link_t link; /* List of full/partial slabs */ |
124 | link_t link; /* List of full/partial slabs */ |
Line 476... | Line 485... | ||
476 | int (*constructor)(void *obj, int kmflag), |
485 | int (*constructor)(void *obj, int kmflag), |
477 | void (*destructor)(void *obj), |
486 | void (*destructor)(void *obj), |
478 | int flags) |
487 | int flags) |
479 | { |
488 | { |
480 | int i; |
489 | int i; |
- | 490 | int pages; |
|
481 | 491 | ||
482 | memsetb((__address)cache, sizeof(*cache), 0); |
492 | memsetb((__address)cache, sizeof(*cache), 0); |
483 | cache->name = name; |
493 | cache->name = name; |
484 | 494 | ||
485 | if (align < sizeof(__native)) |
495 | if (align < sizeof(__native)) |
Line 505... | Line 515... | ||
505 | /* Compute slab sizes, object counts in slabs etc. */ |
515 | /* Compute slab sizes, object counts in slabs etc. */ |
506 | if (cache->size < SLAB_INSIDE_SIZE) |
516 | if (cache->size < SLAB_INSIDE_SIZE) |
507 | cache->flags |= SLAB_CACHE_SLINSIDE; |
517 | cache->flags |= SLAB_CACHE_SLINSIDE; |
508 | 518 | ||
509 | /* Minimum slab order */ |
519 | /* Minimum slab order */ |
510 | cache->order = (cache->size-1) >> PAGE_WIDTH; |
520 | pages = ((cache->size-1) >> PAGE_WIDTH) + 1; |
- | 521 | cache->order = fnzb(pages); |
|
511 | 522 | ||
512 | while (badness(cache) > SLAB_MAX_BADNESS(cache)) { |
523 | while (badness(cache) > SLAB_MAX_BADNESS(cache)) { |
513 | cache->order += 1; |
524 | cache->order += 1; |
514 | } |
525 | } |
515 | cache->objects = comp_objects(cache); |
526 | cache->objects = comp_objects(cache); |
Line 630... | Line 641... | ||
630 | ipl_t ipl; |
641 | ipl_t ipl; |
631 | void *result = NULL; |
642 | void *result = NULL; |
632 | 643 | ||
633 | /* Disable interrupts to avoid deadlocks with interrupt handlers */ |
644 | /* Disable interrupts to avoid deadlocks with interrupt handlers */ |
634 | ipl = interrupts_disable(); |
645 | ipl = interrupts_disable(); |
635 | 646 | ||
636 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) |
647 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE) && CPU) |
637 | result = magazine_obj_get(cache); |
648 | result = magazine_obj_get(cache); |
638 | 649 | ||
639 | if (!result) { |
650 | if (!result) { |
640 | spinlock_lock(&cache->lock); |
651 | spinlock_lock(&cache->lock); |
641 | result = slab_obj_create(cache, flags); |
652 | result = slab_obj_create(cache, flags); |
Line 648... | Line 659... | ||
648 | atomic_inc(&cache->allocated_objs); |
659 | atomic_inc(&cache->allocated_objs); |
649 | 660 | ||
650 | return result; |
661 | return result; |
651 | } |
662 | } |
652 | 663 | ||
653 | /** Return object to cache */ |
664 | /** Return object to cache, use slab if known */ |
654 | void slab_free(slab_cache_t *cache, void *obj) |
665 | static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) |
655 | { |
666 | { |
656 | ipl_t ipl; |
667 | ipl_t ipl; |
657 | 668 | ||
658 | ipl = interrupts_disable(); |
669 | ipl = interrupts_disable(); |
659 | 670 | ||
660 | if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \ |
671 | if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \ |
- | 672 | || !CPU \ |
|
661 | || magazine_obj_put(cache, obj)) { |
673 | || magazine_obj_put(cache, obj)) { |
662 | 674 | ||
663 | spinlock_lock(&cache->lock); |
675 | spinlock_lock(&cache->lock); |
664 | slab_obj_destroy(cache, obj, NULL); |
676 | slab_obj_destroy(cache, obj, slab); |
665 | spinlock_unlock(&cache->lock); |
677 | spinlock_unlock(&cache->lock); |
666 | } |
678 | } |
667 | interrupts_restore(ipl); |
679 | interrupts_restore(ipl); |
668 | atomic_dec(&cache->allocated_objs); |
680 | atomic_dec(&cache->allocated_objs); |
669 | } |
681 | } |
670 | 682 | ||
- | 683 | /** Return slab object to cache */ |
|
- | 684 | void slab_free(slab_cache_t *cache, void *obj) |
|
- | 685 | { |
|
- | 686 | _slab_free(cache,obj,NULL); |
|
- | 687 | } |
|
- | 688 | ||
671 | /* Go through all caches and reclaim what is possible */ |
689 | /* Go through all caches and reclaim what is possible */ |
672 | count_t slab_reclaim(int flags) |
690 | count_t slab_reclaim(int flags) |
673 | { |
691 | { |
674 | slab_cache_t *cache; |
692 | slab_cache_t *cache; |
675 | link_t *cur; |
693 | link_t *cur; |
Line 708... | Line 726... | ||
708 | spinlock_unlock(&slab_cache_lock); |
726 | spinlock_unlock(&slab_cache_lock); |
709 | } |
727 | } |
710 | 728 | ||
711 | void slab_cache_init(void) |
729 | void slab_cache_init(void) |
712 | { |
730 | { |
- | 731 | int i, size; |
|
- | 732 | ||
713 | /* Initialize magazine cache */ |
733 | /* Initialize magazine cache */ |
714 | _slab_cache_create(&mag_cache, |
734 | _slab_cache_create(&mag_cache, |
715 | "slab_magazine", |
735 | "slab_magazine", |
716 | sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*), |
736 | sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*), |
717 | sizeof(__address), |
737 | sizeof(__address), |
Line 729... | Line 749... | ||
729 | sizeof(slab_t), |
749 | sizeof(slab_t), |
730 | 0, NULL, NULL, |
750 | 0, NULL, NULL, |
731 | SLAB_CACHE_SLINSIDE); |
751 | SLAB_CACHE_SLINSIDE); |
732 | 752 | ||
733 | /* Initialize structures for malloc */ |
753 | /* Initialize structures for malloc */ |
- | 754 | for (i=0, size=(1<<SLAB_MIN_MALLOC_W); |
|
- | 755 | i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1); |
|
- | 756 | i++, size <<= 1) { |
|
- | 757 | malloc_caches[i] = slab_cache_create(malloc_names[i], |
|
- | 758 | size, 0, |
|
- | 759 | NULL,NULL,0); |
|
- | 760 | } |
|
- | 761 | } |
|
- | 762 | ||
- | 763 | /**************************************/ |
|
- | 764 | /* kalloc/kfree functions */ |
|
- | 765 | void * kalloc(unsigned int size, int flags) |
|
- | 766 | { |
|
- | 767 | int idx; |
|
- | 768 | ||
- | 769 | ASSERT( size && size <= (1 << SLAB_MAX_MALLOC_W)); |
|
- | 770 | ||
- | 771 | if (size < (1 << SLAB_MIN_MALLOC_W)) |
|
- | 772 | size = (1 << SLAB_MIN_MALLOC_W); |
|
- | 773 | ||
- | 774 | idx = fnzb(size-1) - SLAB_MIN_MALLOC_W + 1; |
|
- | 775 | ||
- | 776 | return slab_alloc(malloc_caches[idx], flags); |
|
- | 777 | } |
|
- | 778 | ||
- | 779 | ||
- | 780 | void kfree(void *obj) |
|
- | 781 | { |
|
- | 782 | slab_t *slab = obj2slab(obj); |
|
- | 783 | ||
- | 784 | _slab_free(slab->cache, obj, slab); |
|
734 | } |
785 | } |