Subversion Repositories HelenOS

Rev

Rev 3153 | Rev 4344 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3153 Rev 3191
Line 165... Line 165...
165
 
165
 
166
/**
166
/**
167
 * Allocate frames for slab space and initialize
167
 * Allocate frames for slab space and initialize
168
 *
168
 *
169
 */
169
 */
170
static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
170
static slab_t *slab_space_alloc(slab_cache_t *cache, int flags)
171
{
171
{
172
    void *data;
172
    void *data;
173
    slab_t *slab;
173
    slab_t *slab;
174
    size_t fsize;
174
    size_t fsize;
175
    unsigned int i;
175
    unsigned int i;
Line 177... Line 177...
177
   
177
   
178
    data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone);
178
    data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone);
179
    if (!data) {
179
    if (!data) {
180
        return NULL;
180
        return NULL;
181
    }
181
    }
182
    if (! (cache->flags & SLAB_CACHE_SLINSIDE)) {
182
    if (!(cache->flags & SLAB_CACHE_SLINSIDE)) {
183
        slab = slab_alloc(slab_extern_cache, flags);
183
        slab = slab_alloc(slab_extern_cache, flags);
184
        if (!slab) {
184
        if (!slab) {
185
            frame_free(KA2PA(data));
185
            frame_free(KA2PA(data));
186
            return NULL;
186
            return NULL;
187
        }
187
        }
Line 198... Line 198...
198
    slab->available = cache->objects;
198
    slab->available = cache->objects;
199
    slab->nextavail = 0;
199
    slab->nextavail = 0;
200
    slab->cache = cache;
200
    slab->cache = cache;
201
 
201
 
202
    for (i = 0; i < cache->objects; i++)
202
    for (i = 0; i < cache->objects; i++)
203
        *((int *) (slab->start + i*cache->size)) = i+1;
203
        *((int *) (slab->start + i*cache->size)) = i + 1;
204
 
204
 
205
    atomic_inc(&cache->allocated_slabs);
205
    atomic_inc(&cache->allocated_slabs);
206
    return slab;
206
    return slab;
207
}
207
}
208
 
208
 
Line 237... Line 237...
237
 *
237
 *
238
 * @param slab If the caller knows directly slab of the object, otherwise NULL
238
 * @param slab If the caller knows directly slab of the object, otherwise NULL
239
 *
239
 *
240
 * @return Number of freed pages
240
 * @return Number of freed pages
241
 */
241
 */
242
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
242
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, slab_t *slab)
243
                slab_t *slab)
-
 
244
{
243
{
245
    int freed = 0;
244
    int freed = 0;
246
 
245
 
247
    if (!slab)
246
    if (!slab)
248
        slab = obj2slab(obj);
247
        slab = obj2slab(obj);
Line 254... Line 253...
254
   
253
   
255
    spinlock_lock(&cache->slablock);
254
    spinlock_lock(&cache->slablock);
256
    ASSERT(slab->available < cache->objects);
255
    ASSERT(slab->available < cache->objects);
257
 
256
 
258
    *((int *)obj) = slab->nextavail;
257
    *((int *)obj) = slab->nextavail;
259
    slab->nextavail = (obj - slab->start)/cache->size;
258
    slab->nextavail = (obj - slab->start) / cache->size;
260
    slab->available++;
259
    slab->available++;
261
 
260
 
262
    /* Move it to correct list */
261
    /* Move it to correct list */
263
    if (slab->available == cache->objects) {
262
    if (slab->available == cache->objects) {
264
        /* Free associated memory */
263
        /* Free associated memory */
Line 279... Line 278...
279
/**
278
/**
280
 * Take new object from slab or create new if needed
279
 * Take new object from slab or create new if needed
281
 *
280
 *
282
 * @return Object address or null
281
 * @return Object address or null
283
 */
282
 */
284
static void * slab_obj_create(slab_cache_t *cache, int flags)
283
static void *slab_obj_create(slab_cache_t *cache, int flags)
285
{
284
{
286
    slab_t *slab;
285
    slab_t *slab;
287
    void *obj;
286
    void *obj;
288
 
287
 
289
    spinlock_lock(&cache->slablock);
288
    spinlock_lock(&cache->slablock);
Line 299... Line 298...
299
        slab = slab_space_alloc(cache, flags);
298
        slab = slab_space_alloc(cache, flags);
300
        if (!slab)
299
        if (!slab)
301
            return NULL;
300
            return NULL;
302
        spinlock_lock(&cache->slablock);
301
        spinlock_lock(&cache->slablock);
303
    } else {
302
    } else {
304
        slab = list_get_instance(cache->partial_slabs.next, slab_t, link);
303
        slab = list_get_instance(cache->partial_slabs.next, slab_t,
-
 
304
            link);
305
        list_remove(&slab->link);
305
        list_remove(&slab->link);
306
    }
306
    }
307
    obj = slab->start + slab->nextavail * cache->size;
307
    obj = slab->start + slab->nextavail * cache->size;
308
    slab->nextavail = *((int *)obj);
308
    slab->nextavail = *((int *)obj);
309
    slab->available--;
309
    slab->available--;
Line 330... Line 330...
330
 * Finds a full magazine in cache, takes it from list
330
 * Finds a full magazine in cache, takes it from list
331
 * and returns it
331
 * and returns it
332
 *
332
 *
333
 * @param first If true, return first, else last mag
333
 * @param first If true, return first, else last mag
334
 */
334
 */
335
static slab_magazine_t * get_mag_from_cache(slab_cache_t *cache,
335
static slab_magazine_t *get_mag_from_cache(slab_cache_t *cache, int first)
336
                        int first)
-
 
337
{
336
{
338
    slab_magazine_t *mag = NULL;
337
    slab_magazine_t *mag = NULL;
339
    link_t *cur;
338
    link_t *cur;
340
 
339
 
341
    spinlock_lock(&cache->maglock);
340
    spinlock_lock(&cache->maglock);
Line 366... Line 365...
366
/**
365
/**
367
 * Free all objects in magazine and free memory associated with magazine
366
 * Free all objects in magazine and free memory associated with magazine
368
 *
367
 *
369
 * @return Number of freed pages
368
 * @return Number of freed pages
370
 */
369
 */
371
static count_t magazine_destroy(slab_cache_t *cache,
370
static count_t magazine_destroy(slab_cache_t *cache, slab_magazine_t *mag)
372
                slab_magazine_t *mag)
-
 
373
{
371
{
374
    unsigned int i;
372
    unsigned int i;
375
    count_t frames = 0;
373
    count_t frames = 0;
376
 
374
 
377
    for (i = 0; i < mag->busy; i++) {
375
    for (i = 0; i < mag->busy; i++) {
Line 387... Line 385...
387
/**
385
/**
388
 * Find full magazine, set it as current and return it
386
 * Find full magazine, set it as current and return it
389
 *
387
 *
390
 * Assume cpu_magazine lock is held
388
 * Assume cpu_magazine lock is held
391
 */
389
 */
392
static slab_magazine_t * get_full_current_mag(slab_cache_t *cache)
390
static slab_magazine_t *get_full_current_mag(slab_cache_t *cache)
393
{
391
{
394
    slab_magazine_t *cmag, *lastmag, *newmag;
392
    slab_magazine_t *cmag, *lastmag, *newmag;
395
 
393
 
396
    cmag = cache->mag_cache[CPU->id].current;
394
    cmag = cache->mag_cache[CPU->id].current;
397
    lastmag = cache->mag_cache[CPU->id].last;
395
    lastmag = cache->mag_cache[CPU->id].last;
Line 421... Line 419...
421
/**
419
/**
422
 * Try to find object in CPU-cache magazines
420
 * Try to find object in CPU-cache magazines
423
 *
421
 *
424
 * @return Pointer to object or NULL if not available
422
 * @return Pointer to object or NULL if not available
425
 */
423
 */
426
static void * magazine_obj_get(slab_cache_t *cache)
424
static void *magazine_obj_get(slab_cache_t *cache)
427
{
425
{
428
    slab_magazine_t *mag;
426
    slab_magazine_t *mag;
429
    void *obj;
427
    void *obj;
430
 
428
 
431
    if (!CPU)
429
    if (!CPU)
Line 456... Line 454...
456
 *  If full, try the last.
454
 *  If full, try the last.
457
 *   If full, put to magazines list.
455
 *   If full, put to magazines list.
458
 *   allocate new, exchange last & current
456
 *   allocate new, exchange last & current
459
 *
457
 *
460
 */
458
 */
461
static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache)
459
static slab_magazine_t *make_empty_current_mag(slab_cache_t *cache)
462
{
460
{
463
    slab_magazine_t *cmag,*lastmag,*newmag;
461
    slab_magazine_t *cmag,*lastmag,*newmag;
464
 
462
 
465
    cmag = cache->mag_cache[CPU->id].current;
463
    cmag = cache->mag_cache[CPU->id].current;
466
    lastmag = cache->mag_cache[CPU->id].last;
464
    lastmag = cache->mag_cache[CPU->id].last;
Line 528... Line 526...
528
 
526
 
529
/** Return number of objects that fit in certain cache size */
527
/** Return number of objects that fit in certain cache size */
530
static unsigned int comp_objects(slab_cache_t *cache)
528
static unsigned int comp_objects(slab_cache_t *cache)
531
{
529
{
532
    if (cache->flags & SLAB_CACHE_SLINSIDE)
530
    if (cache->flags & SLAB_CACHE_SLINSIDE)
533
        return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
531
        return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) /
-
 
532
            cache->size;
534
    else
533
    else
535
        return (PAGE_SIZE << cache->order) / cache->size;
534
        return (PAGE_SIZE << cache->order) / cache->size;
536
}
535
}
537
 
536
 
538
/** Return wasted space in slab */
537
/** Return wasted space in slab */
Line 555... Line 554...
555
{
554
{
556
    unsigned int i;
555
    unsigned int i;
557
   
556
   
558
    ASSERT(_slab_initialized >= 2);
557
    ASSERT(_slab_initialized >= 2);
559
 
558
 
560
    cache->mag_cache = malloc(sizeof(slab_mag_cache_t) * config.cpu_count,0);
559
    cache->mag_cache = malloc(sizeof(slab_mag_cache_t) * config.cpu_count,
-
 
560
        0);
561
    for (i = 0; i < config.cpu_count; i++) {
561
    for (i = 0; i < config.cpu_count; i++) {
562
        memsetb(&cache->mag_cache[i], sizeof(cache->mag_cache[i]), 0);
562
        memsetb(&cache->mag_cache[i], sizeof(cache->mag_cache[i]), 0);
563
        spinlock_initialize(&cache->mag_cache[i].lock, "slab_maglock_cpu");
563
        spinlock_initialize(&cache->mag_cache[i].lock,
-
 
564
            "slab_maglock_cpu");
564
    }
565
    }
565
}
566
}
566
 
567
 
567
/** Initialize allocated memory as a slab cache */
568
/** Initialize allocated memory as a slab cache */
568
static void
569
static void
569
_slab_cache_create(slab_cache_t *cache,
570
_slab_cache_create(slab_cache_t *cache, char *name, size_t size, size_t align,
570
           char *name,
-
 
571
           size_t size,
-
 
572
           size_t align,
-
 
573
           int (*constructor)(void *obj, int kmflag),
571
    int (*constructor)(void *obj, int kmflag), int (*destructor)(void *obj),
574
           int (*destructor)(void *obj),
-
 
575
           int flags)
572
    int flags)
576
{
573
{
577
    int pages;
574
    int pages;
578
    ipl_t ipl;
575
    ipl_t ipl;
579
 
576
 
580
    memsetb(cache, sizeof(*cache), 0);
577
    memsetb(cache, sizeof(*cache), 0);
Line 593... Line 590...
593
    list_initialize(&cache->full_slabs);
590
    list_initialize(&cache->full_slabs);
594
    list_initialize(&cache->partial_slabs);
591
    list_initialize(&cache->partial_slabs);
595
    list_initialize(&cache->magazines);
592
    list_initialize(&cache->magazines);
596
    spinlock_initialize(&cache->slablock, "slab_lock");
593
    spinlock_initialize(&cache->slablock, "slab_lock");
597
    spinlock_initialize(&cache->maglock, "slab_maglock");
594
    spinlock_initialize(&cache->maglock, "slab_maglock");
598
    if (! (cache->flags & SLAB_CACHE_NOMAGAZINE))
595
    if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
599
        make_magcache(cache);
596
        make_magcache(cache);
600
 
597
 
601
    /* Compute slab sizes, object counts in slabs etc. */
598
    /* Compute slab sizes, object counts in slabs etc. */
602
    if (cache->size < SLAB_INSIDE_SIZE)
599
    if (cache->size < SLAB_INSIDE_SIZE)
603
        cache->flags |= SLAB_CACHE_SLINSIDE;
600
        cache->flags |= SLAB_CACHE_SLINSIDE;
Line 606... Line 603...
606
    pages = SIZE2FRAMES(cache->size);
603
    pages = SIZE2FRAMES(cache->size);
607
    /* We need the 2^order >= pages */
604
    /* We need the 2^order >= pages */
608
    if (pages == 1)
605
    if (pages == 1)
609
        cache->order = 0;
606
        cache->order = 0;
610
    else
607
    else
611
        cache->order = fnzb(pages-1)+1;
608
        cache->order = fnzb(pages - 1) + 1;
612
 
609
 
613
    while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
610
    while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
614
        cache->order += 1;
611
        cache->order += 1;
615
    }
612
    }
616
    cache->objects = comp_objects(cache);
613
    cache->objects = comp_objects(cache);
Line 627... Line 624...
627
    spinlock_unlock(&slab_cache_lock);
624
    spinlock_unlock(&slab_cache_lock);
628
    interrupts_restore(ipl);
625
    interrupts_restore(ipl);
629
}
626
}
630
 
627
 
631
/** Create slab cache  */
628
/** Create slab cache  */
632
slab_cache_t * slab_cache_create(char *name,
-
 
633
                 size_t size,
629
slab_cache_t *
634
                 size_t align,
630
slab_cache_create(char *name, size_t size, size_t align,
635
                 int (*constructor)(void *obj, int kmflag),
631
    int (*constructor)(void *obj, int kmflag), int (*destructor)(void *obj),
636
                 int (*destructor)(void *obj),
-
 
637
                 int flags)
632
    int flags)
638
{
633
{
639
    slab_cache_t *cache;
634
    slab_cache_t *cache;
640
 
635
 
641
    cache = slab_alloc(&slab_cache_cache, 0);
636
    cache = slab_alloc(&slab_cache_cache, 0);
642
    _slab_cache_create(cache, name, size, align, constructor, destructor,
637
    _slab_cache_create(cache, name, size, align, constructor, destructor,
643
               flags);
638
        flags);
644
    return cache;
639
    return cache;
645
}
640
}
646
 
641
 
647
/**
642
/**
648
 * Reclaim space occupied by objects that are already free
643
 * Reclaim space occupied by objects that are already free
Line 662... Line 657...
662
 
657
 
663
    /* We count up to original magazine count to avoid
658
    /* We count up to original magazine count to avoid
664
     * endless loop
659
     * endless loop
665
     */
660
     */
666
    magcount = atomic_get(&cache->magazine_counter);
661
    magcount = atomic_get(&cache->magazine_counter);
667
    while (magcount-- && (mag=get_mag_from_cache(cache,0))) {
662
    while (magcount-- && (mag=get_mag_from_cache(cache, 0))) {
668
        frames += magazine_destroy(cache,mag);
663
        frames += magazine_destroy(cache,mag);
669
        if (!(flags & SLAB_RECLAIM_ALL) && frames)
664
        if (!(flags & SLAB_RECLAIM_ALL) && frames)
670
            break;
665
            break;
671
    }
666
    }
672
   
667
   
Line 715... Line 710...
715
   
710
   
716
    /* Destroy all magazines */
711
    /* Destroy all magazines */
717
    _slab_reclaim(cache, SLAB_RECLAIM_ALL);
712
    _slab_reclaim(cache, SLAB_RECLAIM_ALL);
718
 
713
 
719
    /* All slabs must be empty */
714
    /* All slabs must be empty */
720
    if (!list_empty(&cache->full_slabs) \
715
    if (!list_empty(&cache->full_slabs) ||
721
        || !list_empty(&cache->partial_slabs))
716
        !list_empty(&cache->partial_slabs))
722
        panic("Destroying cache that is not empty.");
717
        panic("Destroying cache that is not empty.");
723
 
718
 
724
    if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
719
    if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
725
        free(cache->mag_cache);
720
        free(cache->mag_cache);
726
    slab_free(&slab_cache_cache, cache);
721
    slab_free(&slab_cache_cache, cache);
727
}
722
}
728
 
723
 
729
/** Allocate new object from cache - if no flags given, always returns
724
/** Allocate new object from cache - if no flags given, always returns memory */
730
    memory */
-
 
731
void * slab_alloc(slab_cache_t *cache, int flags)
725
void *slab_alloc(slab_cache_t *cache, int flags)
732
{
726
{
733
    ipl_t ipl;
727
    ipl_t ipl;
734
    void *result = NULL;
728
    void *result = NULL;
735
   
729
   
736
    /* Disable interrupts to avoid deadlocks with interrupt handlers */
730
    /* Disable interrupts to avoid deadlocks with interrupt handlers */
Line 755... Line 749...
755
{
749
{
756
    ipl_t ipl;
750
    ipl_t ipl;
757
 
751
 
758
    ipl = interrupts_disable();
752
    ipl = interrupts_disable();
759
 
753
 
760
    if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
754
    if ((cache->flags & SLAB_CACHE_NOMAGAZINE) ||
761
        || magazine_obj_put(cache, obj)) {
755
        magazine_obj_put(cache, obj)) {
762
 
-
 
763
        slab_obj_destroy(cache, obj, slab);
756
        slab_obj_destroy(cache, obj, slab);
764
 
757
 
765
    }
758
    }
766
    interrupts_restore(ipl);
759
    interrupts_restore(ipl);
767
    atomic_dec(&cache->allocated_objs);
760
    atomic_dec(&cache->allocated_objs);
Line 784... Line 777...
784
 
777
 
785
    /* TODO: Add assert, that interrupts are disabled, otherwise
778
    /* TODO: Add assert, that interrupts are disabled, otherwise
786
     * memory allocation from interrupts can deadlock.
779
     * memory allocation from interrupts can deadlock.
787
     */
780
     */
788
 
781
 
789
    for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
782
    for (cur = slab_cache_list.next; cur != &slab_cache_list;
-
 
783
        cur = cur->next) {
790
        cache = list_get_instance(cur, slab_cache_t, link);
784
        cache = list_get_instance(cur, slab_cache_t, link);
791
        frames += _slab_reclaim(cache, flags);
785
        frames += _slab_reclaim(cache, flags);
792
    }
786
    }
793
 
787
 
794
    spinlock_unlock(&slab_cache_lock);
788
    spinlock_unlock(&slab_cache_lock);
Line 798... Line 792...
798
 
792
 
799
 
793
 
800
/* Print list of slabs */
794
/* Print list of slabs */
801
void slab_print_list(void)
795
void slab_print_list(void)
802
{
796
{
-
 
797
    int skip = 0;
-
 
798
 
-
 
799
    printf("slab name        size     pages  obj/pg slabs  cached allocated"
-
 
800
        " ctl\n");
-
 
801
    printf("---------------- -------- ------ ------ ------ ------ ---------"
-
 
802
        " ---\n");
-
 
803
 
-
 
804
    while (true) {
803
    slab_cache_t *cache;
805
        slab_cache_t *cache;
804
    link_t *cur;
806
        link_t *cur;
805
    ipl_t ipl;
807
        ipl_t ipl;
-
 
808
        int i;
-
 
809
 
-
 
810
        /*
-
 
811
         * We must not hold the slab_cache_lock spinlock when printing
-
 
812
         * the statistics. Otherwise we can easily deadlock if the print
-
 
813
         * needs to allocate memory.
-
 
814
         *
-
 
815
         * Therefore, we walk through the slab cache list, skipping some
-
 
816
         * amount of already processed caches during each iteration and
-
 
817
         * gathering statistics about the first unprocessed cache. For
-
 
818
         * the sake of printing the statistics, we realese the
-
 
819
         * slab_cache_lock and reacquire it afterwards. Then the walk
-
 
820
         * starts again.
-
 
821
         *
-
 
822
         * This limits both the efficiency and also accuracy of the
-
 
823
         * obtained statistics. The efficiency is decreased because the
-
 
824
         * time complexity of the algorithm is quadratic instead of
-
 
825
         * linear. The accuracy is impacted because we drop the lock
-
 
826
         * after processing one cache. If there is someone else
-
 
827
         * manipulating the cache list, we might omit an arbitrary
-
 
828
         * number of caches or process one cache multiple times.
-
 
829
         * However, we don't bleed for this algorithm for it is only
-
 
830
         * statistics.
-
 
831
         */
806
   
832
 
807
    ipl = interrupts_disable();
833
        ipl = interrupts_disable();
808
    spinlock_lock(&slab_cache_lock);
834
        spinlock_lock(&slab_cache_lock);
-
 
835
 
809
    printf("slab name        size     pages  obj/pg slabs  cached allocated ctl\n");
836
        for (i = 0, cur = slab_cache_list.next;
810
    printf("---------------- -------- ------ ------ ------ ------ --------- ---\n");
837
            i < skip && cur != &slab_cache_list;
-
 
838
            i++, cur = cur->next)
-
 
839
            ;
811
   
840
 
812
    for (cur = slab_cache_list.next; cur != &slab_cache_list; cur = cur->next) {
841
        if (cur == &slab_cache_list) {
-
 
842
            spinlock_unlock(&slab_cache_lock);
-
 
843
            interrupts_restore(ipl);
-
 
844
            break;
-
 
845
        }
-
 
846
 
-
 
847
        skip++;
-
 
848
 
813
        cache = list_get_instance(cur, slab_cache_t, link);
849
        cache = list_get_instance(cur, slab_cache_t, link);
-
 
850
 
-
 
851
        char *name = cache->name;
-
 
852
        uint8_t order = cache->order;
-
 
853
        size_t size = cache->size;
-
 
854
        unsigned int objects = cache->objects;
-
 
855
        long allocated_slabs = atomic_get(&cache->allocated_slabs);
-
 
856
        long cached_objs = atomic_get(&cache->cached_objs);
-
 
857
        long allocated_objs = atomic_get(&cache->allocated_objs);
-
 
858
        int flags = cache->flags;
-
 
859
       
-
 
860
        spinlock_unlock(&slab_cache_lock);
-
 
861
        interrupts_restore(ipl);
814
       
862
       
815
        printf("%-16s %8" PRIs " %6d %6u %6ld %6ld %9ld %-3s\n",
863
        printf("%-16s %8" PRIs " %6d %6u %6ld %6ld %9ld %-3s\n",
816
            cache->name, cache->size, (1 << cache->order), cache->objects,
864
            name, size, (1 << order), objects, allocated_slabs,
817
            atomic_get(&cache->allocated_slabs), atomic_get(&cache->cached_objs),
865
            cached_objs, allocated_objs,
818
            atomic_get(&cache->allocated_objs), cache->flags & SLAB_CACHE_SLINSIDE ? "in" : "out");
866
            flags & SLAB_CACHE_SLINSIDE ? "in" : "out");
819
    }
867
    }
820
    spinlock_unlock(&slab_cache_lock);
-
 
821
    interrupts_restore(ipl);
-
 
822
}
868
}
823
 
869
 
824
void slab_cache_init(void)
870
void slab_cache_init(void)
825
{
871
{
826
    int i, size;
872
    int i, size;
827
 
873
 
828
    /* Initialize magazine cache */
874
    /* Initialize magazine cache */
829
    _slab_cache_create(&mag_cache,
875
    _slab_cache_create(&mag_cache, "slab_magazine",
830
               "slab_magazine",
-
 
831
               sizeof(slab_magazine_t) + SLAB_MAG_SIZE * sizeof(void*),
876
        sizeof(slab_magazine_t) + SLAB_MAG_SIZE * sizeof(void*),
832
               sizeof(uintptr_t),
877
        sizeof(uintptr_t), NULL, NULL, SLAB_CACHE_NOMAGAZINE |
833
               NULL, NULL,
-
 
834
               SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
878
        SLAB_CACHE_SLINSIDE);
835
    /* Initialize slab_cache cache */
879
    /* Initialize slab_cache cache */
836
    _slab_cache_create(&slab_cache_cache,
880
    _slab_cache_create(&slab_cache_cache, "slab_cache",
837
               "slab_cache",
-
 
838
               sizeof(slab_cache_cache),
881
        sizeof(slab_cache_cache), sizeof(uintptr_t), NULL, NULL,
839
               sizeof(uintptr_t),
-
 
840
               NULL, NULL,
-
 
841
               SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
882
        SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
842
    /* Initialize external slab cache */
883
    /* Initialize external slab cache */
843
    slab_extern_cache = slab_cache_create("slab_extern",
884
    slab_extern_cache = slab_cache_create("slab_extern", sizeof(slab_t), 0,
844
                          sizeof(slab_t),
-
 
845
                          0, NULL, NULL,
-
 
846
                          SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED);
885
        NULL, NULL, SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED);
847
 
886
 
848
    /* Initialize structures for malloc */
887
    /* Initialize structures for malloc */
849
    for (i=0, size=(1 << SLAB_MIN_MALLOC_W);
888
    for (i = 0, size = (1 << SLAB_MIN_MALLOC_W);
850
         i < (SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1);
889
        i < (SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1);
851
         i++, size <<= 1) {
890
        i++, size <<= 1) {
852
        malloc_caches[i] = slab_cache_create(malloc_names[i],
891
        malloc_caches[i] = slab_cache_create(malloc_names[i], size, 0,
853
                             size, 0,
-
 
854
                             NULL,NULL, SLAB_CACHE_MAGDEFERRED);
892
            NULL, NULL, SLAB_CACHE_MAGDEFERRED);
855
    }
893
    }
856
#ifdef CONFIG_DEBUG       
894
#ifdef CONFIG_DEBUG       
857
    _slab_initialized = 1;
895
    _slab_initialized = 1;
858
#endif
896
#endif
859
}
897
}
Line 874... Line 912...
874
    _slab_initialized = 2;
912
    _slab_initialized = 2;
875
#endif
913
#endif
876
 
914
 
877
    spinlock_lock(&slab_cache_lock);
915
    spinlock_lock(&slab_cache_lock);
878
   
916
   
879
    for (cur=slab_cache_list.next; cur != &slab_cache_list;cur=cur->next){
917
    for (cur = slab_cache_list.next; cur != &slab_cache_list;
-
 
918
        cur = cur->next){
880
        s = list_get_instance(cur, slab_cache_t, link);
919
        s = list_get_instance(cur, slab_cache_t, link);
881
        if ((s->flags & SLAB_CACHE_MAGDEFERRED) != SLAB_CACHE_MAGDEFERRED)
920
        if ((s->flags & SLAB_CACHE_MAGDEFERRED) !=
-
 
921
            SLAB_CACHE_MAGDEFERRED)
882
            continue;
922
            continue;
883
        make_magcache(s);
923
        make_magcache(s);
884
        s->flags &= ~SLAB_CACHE_MAGDEFERRED;
924
        s->flags &= ~SLAB_CACHE_MAGDEFERRED;
885
    }
925
    }
886
 
926
 
887
    spinlock_unlock(&slab_cache_lock);
927
    spinlock_unlock(&slab_cache_lock);
888
}
928
}
889
 
929
 
890
/**************************************/
930
/**************************************/
891
/* kalloc/kfree functions             */
931
/* kalloc/kfree functions             */
892
void * malloc(unsigned int size, int flags)
932
void *malloc(unsigned int size, int flags)
893
{
933
{
894
    ASSERT(_slab_initialized);
934
    ASSERT(_slab_initialized);
895
    ASSERT(size && size <= (1 << SLAB_MAX_MALLOC_W));
935
    ASSERT(size && size <= (1 << SLAB_MAX_MALLOC_W));
896
   
936
   
897
    if (size < (1 << SLAB_MIN_MALLOC_W))
937
    if (size < (1 << SLAB_MIN_MALLOC_W))
Line 900... Line 940...
900
    int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;
940
    int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;
901
 
941
 
902
    return slab_alloc(malloc_caches[idx], flags);
942
    return slab_alloc(malloc_caches[idx], flags);
903
}
943
}
904
 
944
 
905
void * realloc(void *ptr, unsigned int size, int flags)
945
void *realloc(void *ptr, unsigned int size, int flags)
906
{
946
{
907
    ASSERT(_slab_initialized);
947
    ASSERT(_slab_initialized);
908
    ASSERT(size <= (1 << SLAB_MAX_MALLOC_W));
948
    ASSERT(size <= (1 << SLAB_MAX_MALLOC_W));
909
   
949
   
910
    void *new_ptr;
950
    void *new_ptr;