Subversion Repositories HelenOS

Rev

Rev 3022 | Rev 4201 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3022 Rev 4055
Line 171... Line 171...
171
{
171
{
172
    void *data;
172
    void *data;
173
    slab_t *slab;
173
    slab_t *slab;
174
    size_t fsize;
174
    size_t fsize;
175
    unsigned int i;
175
    unsigned int i;
176
    unsigned int zone = 0;
176
    count_t zone = 0;
177
   
177
   
178
    data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone);
178
    data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone);
179
    if (!data) {
179
    if (!data) {
180
        return NULL;
180
        return NULL;
181
    }
181
    }
Line 237... Line 237...
237
 *
237
 *
238
 * @param slab If the caller knows directly slab of the object, otherwise NULL
238
 * @param slab If the caller knows directly slab of the object, otherwise NULL
239
 *
239
 *
240
 * @return Number of freed pages
240
 * @return Number of freed pages
241
 */
241
 */
242
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
242
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, slab_t *slab)
243
                slab_t *slab)
-
 
244
{
243
{
245
    int freed = 0;
244
    int freed = 0;
246
 
245
 
247
    if (!slab)
246
    if (!slab)
248
        slab = obj2slab(obj);
247
        slab = obj2slab(obj);
Line 299... Line 298...
299
        slab = slab_space_alloc(cache, flags);
298
        slab = slab_space_alloc(cache, flags);
300
        if (!slab)
299
        if (!slab)
301
            return NULL;
300
            return NULL;
302
        spinlock_lock(&cache->slablock);
301
        spinlock_lock(&cache->slablock);
303
    } else {
302
    } else {
304
        slab = list_get_instance(cache->partial_slabs.next, slab_t, link);
303
        slab = list_get_instance(cache->partial_slabs.next, slab_t,
-
 
304
            link);
305
        list_remove(&slab->link);
305
        list_remove(&slab->link);
306
    }
306
    }
307
    obj = slab->start + slab->nextavail * cache->size;
307
    obj = slab->start + slab->nextavail * cache->size;
308
    slab->nextavail = *((int *)obj);
308
    slab->nextavail = *((int *)obj);
309
    slab->available--;
309
    slab->available--;
Line 330... Line 330...
330
 * Finds a full magazine in cache, takes it from list
330
 * Finds a full magazine in cache, takes it from list
331
 * and returns it
331
 * and returns it
332
 *
332
 *
333
 * @param first If true, return first, else last mag
333
 * @param first If true, return first, else last mag
334
 */
334
 */
335
static slab_magazine_t * get_mag_from_cache(slab_cache_t *cache,
335
static slab_magazine_t *get_mag_from_cache(slab_cache_t *cache, int first)
336
                        int first)
-
 
337
{
336
{
338
    slab_magazine_t *mag = NULL;
337
    slab_magazine_t *mag = NULL;
339
    link_t *cur;
338
    link_t *cur;
340
 
339
 
341
    spinlock_lock(&cache->maglock);
340
    spinlock_lock(&cache->maglock);
Line 366... Line 365...
366
/**
365
/**
367
 * Free all objects in magazine and free memory associated with magazine
366
 * Free all objects in magazine and free memory associated with magazine
368
 *
367
 *
369
 * @return Number of freed pages
368
 * @return Number of freed pages
370
 */
369
 */
371
static count_t magazine_destroy(slab_cache_t *cache,
370
static count_t magazine_destroy(slab_cache_t *cache, slab_magazine_t *mag)
372
                slab_magazine_t *mag)
-
 
373
{
371
{
374
    unsigned int i;
372
    unsigned int i;
375
    count_t frames = 0;
373
    count_t frames = 0;
376
 
374
 
377
    for (i = 0; i < mag->busy; i++) {
375
    for (i = 0; i < mag->busy; i++) {
Line 528... Line 526...
528
 
526
 
529
/** Return number of objects that fit in certain cache size */
527
/** Return number of objects that fit in certain cache size */
530
static unsigned int comp_objects(slab_cache_t *cache)
528
static unsigned int comp_objects(slab_cache_t *cache)
531
{
529
{
532
    if (cache->flags & SLAB_CACHE_SLINSIDE)
530
    if (cache->flags & SLAB_CACHE_SLINSIDE)
533
        return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
531
        return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) /
-
 
532
            cache->size;
534
    else
533
    else
535
        return (PAGE_SIZE << cache->order) / cache->size;
534
        return (PAGE_SIZE << cache->order) / cache->size;
536
}
535
}
537
 
536
 
538
/** Return wasted space in slab */
537
/** Return wasted space in slab */
Line 555... Line 554...
555
{
554
{
556
    unsigned int i;
555
    unsigned int i;
557
   
556
   
558
    ASSERT(_slab_initialized >= 2);
557
    ASSERT(_slab_initialized >= 2);
559
 
558
 
560
    cache->mag_cache = malloc(sizeof(slab_mag_cache_t)*config.cpu_count,0);
559
    cache->mag_cache = malloc(sizeof(slab_mag_cache_t) * config.cpu_count,
-
 
560
        0);
561
    for (i = 0; i < config.cpu_count; i++) {
561
    for (i = 0; i < config.cpu_count; i++) {
562
        memsetb((uintptr_t)&cache->mag_cache[i],
562
        memsetb(&cache->mag_cache[i], sizeof(cache->mag_cache[i]), 0);
563
            sizeof(cache->mag_cache[i]), 0);
563
        spinlock_initialize(&cache->mag_cache[i].lock,
564
        spinlock_initialize(&cache->mag_cache[i].lock, "slab_maglock_cpu");
564
            "slab_maglock_cpu");
565
    }
565
    }
566
}
566
}
567
 
567
 
568
/** Initialize allocated memory as a slab cache */
568
/** Initialize allocated memory as a slab cache */
569
static void
569
static void
570
_slab_cache_create(slab_cache_t *cache,
570
_slab_cache_create(slab_cache_t *cache, char *name, size_t size, size_t align,
571
           char *name,
-
 
572
           size_t size,
-
 
573
           size_t align,
-
 
574
           int (*constructor)(void *obj, int kmflag),
571
    int (*constructor)(void *obj, int kmflag), int (*destructor)(void *obj),
575
           int (*destructor)(void *obj),
-
 
576
           int flags)
572
    int flags)
577
{
573
{
578
    int pages;
574
    int pages;
579
    ipl_t ipl;
575
    ipl_t ipl;
580
 
576
 
581
    memsetb((uintptr_t)cache, sizeof(*cache), 0);
577
    memsetb(cache, sizeof(*cache), 0);
582
    cache->name = name;
578
    cache->name = name;
583
 
579
 
584
    if (align < sizeof(unative_t))
580
    if (align < sizeof(unative_t))
585
        align = sizeof(unative_t);
581
        align = sizeof(unative_t);
586
    size = ALIGN_UP(size, align);
582
    size = ALIGN_UP(size, align);
Line 628... Line 624...
628
    spinlock_unlock(&slab_cache_lock);
624
    spinlock_unlock(&slab_cache_lock);
629
    interrupts_restore(ipl);
625
    interrupts_restore(ipl);
630
}
626
}
631
 
627
 
632
/** Create slab cache  */
628
/** Create slab cache  */
633
slab_cache_t * slab_cache_create(char *name,
-
 
634
                 size_t size,
629
slab_cache_t *
635
                 size_t align,
630
slab_cache_create(char *name, size_t size, size_t align,
636
                 int (*constructor)(void *obj, int kmflag),
631
    int (*constructor)(void *obj, int kmflag), int (*destructor)(void *obj),
637
                 int (*destructor)(void *obj),
-
 
638
                 int flags)
632
    int flags)
639
{
633
{
640
    slab_cache_t *cache;
634
    slab_cache_t *cache;
641
 
635
 
642
    cache = slab_alloc(&slab_cache_cache, 0);
636
    cache = slab_alloc(&slab_cache_cache, 0);
Line 716... Line 710...
716
   
710
   
717
    /* Destroy all magazines */
711
    /* Destroy all magazines */
718
    _slab_reclaim(cache, SLAB_RECLAIM_ALL);
712
    _slab_reclaim(cache, SLAB_RECLAIM_ALL);
719
 
713
 
720
    /* All slabs must be empty */
714
    /* All slabs must be empty */
721
    if (!list_empty(&cache->full_slabs) \
715
    if (!list_empty(&cache->full_slabs) ||
722
        || !list_empty(&cache->partial_slabs))
716
        !list_empty(&cache->partial_slabs))
723
        panic("Destroying cache that is not empty.");
717
        panic("Destroying cache that is not empty.");
724
 
718
 
725
    if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
719
    if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
726
        free(cache->mag_cache);
720
        free(cache->mag_cache);
727
    slab_free(&slab_cache_cache, cache);
721
    slab_free(&slab_cache_cache, cache);
728
}
722
}
729
 
723
 
730
/** Allocate new object from cache - if no flags given, always returns
724
/** Allocate new object from cache - if no flags given, always returns memory */
731
    memory */
-
 
732
void * slab_alloc(slab_cache_t *cache, int flags)
725
void *slab_alloc(slab_cache_t *cache, int flags)
733
{
726
{
734
    ipl_t ipl;
727
    ipl_t ipl;
735
    void *result = NULL;
728
    void *result = NULL;
736
   
729
   
Line 756... Line 749...
756
{
749
{
757
    ipl_t ipl;
750
    ipl_t ipl;
758
 
751
 
759
    ipl = interrupts_disable();
752
    ipl = interrupts_disable();
760
 
753
 
761
    if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
754
    if ((cache->flags & SLAB_CACHE_NOMAGAZINE) ||
762
        || magazine_obj_put(cache, obj)) {
755
        magazine_obj_put(cache, obj)) {
763
 
-
 
764
        slab_obj_destroy(cache, obj, slab);
756
        slab_obj_destroy(cache, obj, slab);
765
 
757
 
766
    }
758
    }
767
    interrupts_restore(ipl);
759
    interrupts_restore(ipl);
768
    atomic_dec(&cache->allocated_objs);
760
    atomic_dec(&cache->allocated_objs);
Line 785... Line 777...
785
 
777
 
786
    /* TODO: Add assert, that interrupts are disabled, otherwise
778
    /* TODO: Add assert, that interrupts are disabled, otherwise
787
     * memory allocation from interrupts can deadlock.
779
     * memory allocation from interrupts can deadlock.
788
     */
780
     */
789
 
781
 
790
    for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
782
    for (cur = slab_cache_list.next; cur != &slab_cache_list;
-
 
783
        cur = cur->next) {
791
        cache = list_get_instance(cur, slab_cache_t, link);
784
        cache = list_get_instance(cur, slab_cache_t, link);
792
        frames += _slab_reclaim(cache, flags);
785
        frames += _slab_reclaim(cache, flags);
793
    }
786
    }
794
 
787
 
795
    spinlock_unlock(&slab_cache_lock);
788
    spinlock_unlock(&slab_cache_lock);
Line 799... Line 792...
799
 
792
 
800
 
793
 
801
/* Print list of slabs */
794
/* Print list of slabs */
802
void slab_print_list(void)
795
void slab_print_list(void)
803
{
796
{
-
 
797
    int skip = 0;
-
 
798
 
-
 
799
    printf("slab name        size     pages  obj/pg slabs  cached allocated"
-
 
800
        " ctl\n");
-
 
801
    printf("---------------- -------- ------ ------ ------ ------ ---------"
-
 
802
        " ---\n");
-
 
803
 
-
 
804
    while (true) {
804
    slab_cache_t *cache;
805
        slab_cache_t *cache;
805
    link_t *cur;
806
        link_t *cur;
806
    ipl_t ipl;
807
        ipl_t ipl;
-
 
808
        int i;
-
 
809
 
-
 
810
        /*
-
 
811
         * We must not hold the slab_cache_lock spinlock when printing
-
 
812
         * the statistics. Otherwise we can easily deadlock if the print
-
 
813
         * needs to allocate memory.
-
 
814
         *
-
 
815
         * Therefore, we walk through the slab cache list, skipping some
-
 
816
         * amount of already processed caches during each iteration and
-
 
817
         * gathering statistics about the first unprocessed cache. For
-
 
818
         * the sake of printing the statistics, we realese the
-
 
819
         * slab_cache_lock and reacquire it afterwards. Then the walk
-
 
820
         * starts again.
-
 
821
         *
-
 
822
         * This limits both the efficiency and also accuracy of the
-
 
823
         * obtained statistics. The efficiency is decreased because the
-
 
824
         * time complexity of the algorithm is quadratic instead of
-
 
825
         * linear. The accuracy is impacted because we drop the lock
-
 
826
         * after processing one cache. If there is someone else
-
 
827
         * manipulating the cache list, we might omit an arbitrary
-
 
828
         * number of caches or process one cache multiple times.
-
 
829
         * However, we don't bleed for this algorithm for it is only
-
 
830
         * statistics.
-
 
831
         */
807
   
832
 
808
    ipl = interrupts_disable();
833
        ipl = interrupts_disable();
809
    spinlock_lock(&slab_cache_lock);
834
        spinlock_lock(&slab_cache_lock);
810
    printf("slab name        size     pages  obj/pg slabs  cached allocated ctl\n");
-
 
811
    printf("---------------- -------- ------ ------ ------ ------ --------- ---\n");
-
 
812
   
835
 
813
    for (cur = slab_cache_list.next; cur != &slab_cache_list; cur = cur->next) {
836
        for (i = 0, cur = slab_cache_list.next;
814
        cache = list_get_instance(cur, slab_cache_t, link);
837
            i < skip && cur != &slab_cache_list;
-
 
838
            i++, cur = cur->next)
-
 
839
            ;
815
       
840
 
-
 
841
        if (cur == &slab_cache_list) {
816
        printf("%-16s %8zd %6zd %6zd %6zd %6zd %9zd %-3s\n", cache->name, cache->size, (1 << cache->order), cache->objects, atomic_get(&cache->allocated_slabs), atomic_get(&cache->cached_objs), atomic_get(&cache->allocated_objs), cache->flags & SLAB_CACHE_SLINSIDE ? "in" : "out");
842
            spinlock_unlock(&slab_cache_lock);
-
 
843
            interrupts_restore(ipl);
-
 
844
            break;
817
    }
845
        }
-
 
846
 
-
 
847
        skip++;
-
 
848
 
-
 
849
        cache = list_get_instance(cur, slab_cache_t, link);
-
 
850
 
-
 
851
        char *name = cache->name;
-
 
852
        uint8_t order = cache->order;
-
 
853
        size_t size = cache->size;
-
 
854
        unsigned int objects = cache->objects;
-
 
855
        long allocated_slabs = atomic_get(&cache->allocated_slabs);
-
 
856
        long cached_objs = atomic_get(&cache->cached_objs);
-
 
857
        long allocated_objs = atomic_get(&cache->allocated_objs);
-
 
858
        int flags = cache->flags;
-
 
859
       
818
    spinlock_unlock(&slab_cache_lock);
860
        spinlock_unlock(&slab_cache_lock);
819
    interrupts_restore(ipl);
861
        interrupts_restore(ipl);
-
 
862
       
-
 
863
        printf("%-16s %8" PRIs " %6d %6u %6ld %6ld %9ld %-3s\n",
-
 
864
            name, size, (1 << order), objects, allocated_slabs,
-
 
865
            cached_objs, allocated_objs,
-
 
866
            flags & SLAB_CACHE_SLINSIDE ? "in" : "out");
-
 
867
    }
820
}
868
}
821
 
869
 
822
void slab_cache_init(void)
870
void slab_cache_init(void)
823
{
871
{
824
    int i, size;
872
    int i, size;
825
 
873
 
826
    /* Initialize magazine cache */
874
    /* Initialize magazine cache */
827
    _slab_cache_create(&mag_cache,
875
    _slab_cache_create(&mag_cache, "slab_magazine",
828
               "slab_magazine",
-
 
829
               sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*),
876
        sizeof(slab_magazine_t) + SLAB_MAG_SIZE * sizeof(void*),
830
               sizeof(uintptr_t),
877
        sizeof(uintptr_t), NULL, NULL, SLAB_CACHE_NOMAGAZINE |
831
               NULL, NULL,
-
 
832
               SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
878
        SLAB_CACHE_SLINSIDE);
833
    /* Initialize slab_cache cache */
879
    /* Initialize slab_cache cache */
834
    _slab_cache_create(&slab_cache_cache,
880
    _slab_cache_create(&slab_cache_cache, "slab_cache",
835
               "slab_cache",
-
 
836
               sizeof(slab_cache_cache),
881
        sizeof(slab_cache_cache), sizeof(uintptr_t), NULL, NULL,
837
               sizeof(uintptr_t),
-
 
838
               NULL, NULL,
-
 
839
               SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
882
        SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
840
    /* Initialize external slab cache */
883
    /* Initialize external slab cache */
841
    slab_extern_cache = slab_cache_create("slab_extern",
884
    slab_extern_cache = slab_cache_create("slab_extern", sizeof(slab_t), 0,
842
                          sizeof(slab_t),
-
 
843
                          0, NULL, NULL,
-
 
844
                          SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED);
885
        NULL, NULL, SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED);
845
 
886
 
846
    /* Initialize structures for malloc */
887
    /* Initialize structures for malloc */
847
    for (i=0, size=(1<<SLAB_MIN_MALLOC_W);
888
    for (i = 0, size = (1 << SLAB_MIN_MALLOC_W);
848
         i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1);
889
        i < (SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1);
849
         i++, size <<= 1) {
890
        i++, size <<= 1) {
850
        malloc_caches[i] = slab_cache_create(malloc_names[i],
891
        malloc_caches[i] = slab_cache_create(malloc_names[i], size, 0,
851
                             size, 0,
-
 
852
                             NULL,NULL, SLAB_CACHE_MAGDEFERRED);
892
            NULL, NULL, SLAB_CACHE_MAGDEFERRED);
853
    }
893
    }
854
#ifdef CONFIG_DEBUG       
894
#ifdef CONFIG_DEBUG       
855
    _slab_initialized = 1;
895
    _slab_initialized = 1;
856
#endif
896
#endif
Line 872... Line 912...
872
    _slab_initialized = 2;
912
    _slab_initialized = 2;
873
#endif
913
#endif
874
 
914
 
875
    spinlock_lock(&slab_cache_lock);
915
    spinlock_lock(&slab_cache_lock);
876
   
916
   
877
    for (cur=slab_cache_list.next; cur != &slab_cache_list;cur=cur->next){
917
    for (cur = slab_cache_list.next; cur != &slab_cache_list;
-
 
918
        cur = cur->next){
878
        s = list_get_instance(cur, slab_cache_t, link);
919
        s = list_get_instance(cur, slab_cache_t, link);
879
        if ((s->flags & SLAB_CACHE_MAGDEFERRED) != SLAB_CACHE_MAGDEFERRED)
920
        if ((s->flags & SLAB_CACHE_MAGDEFERRED) !=
-
 
921
            SLAB_CACHE_MAGDEFERRED)
880
            continue;
922
            continue;
881
        make_magcache(s);
923
        make_magcache(s);
882
        s->flags &= ~SLAB_CACHE_MAGDEFERRED;
924
        s->flags &= ~SLAB_CACHE_MAGDEFERRED;
883
    }
925
    }
884
 
926