Subversion Repositories HelenOS

Rev

Rev 3104 | Rev 3183 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3104 Rev 3180
Line 165... Line 165...
165
 
165
 
166
/**
166
/**
167
 * Allocate frames for slab space and initialize
167
 * Allocate frames for slab space and initialize
168
 *
168
 *
169
 */
169
 */
170
static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
170
static slab_t *slab_space_alloc(slab_cache_t *cache, int flags)
171
{
171
{
172
    void *data;
172
    void *data;
173
    slab_t *slab;
173
    slab_t *slab;
174
    size_t fsize;
174
    size_t fsize;
175
    unsigned int i;
175
    unsigned int i;
Line 177... Line 177...
177
   
177
   
178
    data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone);
178
    data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone);
179
    if (!data) {
179
    if (!data) {
180
        return NULL;
180
        return NULL;
181
    }
181
    }
182
    if (! (cache->flags & SLAB_CACHE_SLINSIDE)) {
182
    if (!(cache->flags & SLAB_CACHE_SLINSIDE)) {
183
        slab = slab_alloc(slab_extern_cache, flags);
183
        slab = slab_alloc(slab_extern_cache, flags);
184
        if (!slab) {
184
        if (!slab) {
185
            frame_free(KA2PA(data));
185
            frame_free(KA2PA(data));
186
            return NULL;
186
            return NULL;
187
        }
187
        }
Line 198... Line 198...
198
    slab->available = cache->objects;
198
    slab->available = cache->objects;
199
    slab->nextavail = 0;
199
    slab->nextavail = 0;
200
    slab->cache = cache;
200
    slab->cache = cache;
201
 
201
 
202
    for (i = 0; i < cache->objects; i++)
202
    for (i = 0; i < cache->objects; i++)
203
        *((int *) (slab->start + i*cache->size)) = i+1;
203
        *((int *) (slab->start + i*cache->size)) = i + 1;
204
 
204
 
205
    atomic_inc(&cache->allocated_slabs);
205
    atomic_inc(&cache->allocated_slabs);
206
    return slab;
206
    return slab;
207
}
207
}
208
 
208
 
Line 237... Line 237...
237
 *
237
 *
238
 * @param slab If the caller knows directly slab of the object, otherwise NULL
238
 * @param slab If the caller knows directly slab of the object, otherwise NULL
239
 *
239
 *
240
 * @return Number of freed pages
240
 * @return Number of freed pages
241
 */
241
 */
242
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
242
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, slab_t *slab)
243
                slab_t *slab)
-
 
244
{
243
{
245
    int freed = 0;
244
    int freed = 0;
246
 
245
 
247
    if (!slab)
246
    if (!slab)
248
        slab = obj2slab(obj);
247
        slab = obj2slab(obj);
Line 254... Line 253...
254
   
253
   
255
    spinlock_lock(&cache->slablock);
254
    spinlock_lock(&cache->slablock);
256
    ASSERT(slab->available < cache->objects);
255
    ASSERT(slab->available < cache->objects);
257
 
256
 
258
    *((int *)obj) = slab->nextavail;
257
    *((int *)obj) = slab->nextavail;
259
    slab->nextavail = (obj - slab->start)/cache->size;
258
    slab->nextavail = (obj - slab->start) / cache->size;
260
    slab->available++;
259
    slab->available++;
261
 
260
 
262
    /* Move it to correct list */
261
    /* Move it to correct list */
263
    if (slab->available == cache->objects) {
262
    if (slab->available == cache->objects) {
264
        /* Free associated memory */
263
        /* Free associated memory */
Line 279... Line 278...
279
/**
278
/**
280
 * Take new object from slab or create new if needed
279
 * Take new object from slab or create new if needed
281
 *
280
 *
282
 * @return Object address or null
281
 * @return Object address or null
283
 */
282
 */
284
static void * slab_obj_create(slab_cache_t *cache, int flags)
283
static void *slab_obj_create(slab_cache_t *cache, int flags)
285
{
284
{
286
    slab_t *slab;
285
    slab_t *slab;
287
    void *obj;
286
    void *obj;
288
 
287
 
289
    spinlock_lock(&cache->slablock);
288
    spinlock_lock(&cache->slablock);
Line 299... Line 298...
299
        slab = slab_space_alloc(cache, flags);
298
        slab = slab_space_alloc(cache, flags);
300
        if (!slab)
299
        if (!slab)
301
            return NULL;
300
            return NULL;
302
        spinlock_lock(&cache->slablock);
301
        spinlock_lock(&cache->slablock);
303
    } else {
302
    } else {
304
        slab = list_get_instance(cache->partial_slabs.next, slab_t, link);
303
        slab = list_get_instance(cache->partial_slabs.next, slab_t,
-
 
304
            link);
305
        list_remove(&slab->link);
305
        list_remove(&slab->link);
306
    }
306
    }
307
    obj = slab->start + slab->nextavail * cache->size;
307
    obj = slab->start + slab->nextavail * cache->size;
308
    slab->nextavail = *((int *)obj);
308
    slab->nextavail = *((int *)obj);
309
    slab->available--;
309
    slab->available--;
Line 330... Line 330...
330
 * Finds a full magazine in cache, takes it from list
330
 * Finds a full magazine in cache, takes it from list
331
 * and returns it
331
 * and returns it
332
 *
332
 *
333
 * @param first If true, return first, else last mag
333
 * @param first If true, return first, else last mag
334
 */
334
 */
335
static slab_magazine_t * get_mag_from_cache(slab_cache_t *cache,
335
static slab_magazine_t *get_mag_from_cache(slab_cache_t *cache, int first)
336
                        int first)
-
 
337
{
336
{
338
    slab_magazine_t *mag = NULL;
337
    slab_magazine_t *mag = NULL;
339
    link_t *cur;
338
    link_t *cur;
340
 
339
 
341
    spinlock_lock(&cache->maglock);
340
    spinlock_lock(&cache->maglock);
Line 366... Line 365...
366
/**
365
/**
367
 * Free all objects in magazine and free memory associated with magazine
366
 * Free all objects in magazine and free memory associated with magazine
368
 *
367
 *
369
 * @return Number of freed pages
368
 * @return Number of freed pages
370
 */
369
 */
371
static count_t magazine_destroy(slab_cache_t *cache,
370
static count_t magazine_destroy(slab_cache_t *cache, slab_magazine_t *mag)
372
                slab_magazine_t *mag)
-
 
373
{
371
{
374
    unsigned int i;
372
    unsigned int i;
375
    count_t frames = 0;
373
    count_t frames = 0;
376
 
374
 
377
    for (i = 0; i < mag->busy; i++) {
375
    for (i = 0; i < mag->busy; i++) {
Line 387... Line 385...
387
/**
385
/**
388
 * Find full magazine, set it as current and return it
386
 * Find full magazine, set it as current and return it
389
 *
387
 *
390
 * Assume cpu_magazine lock is held
388
 * Assume cpu_magazine lock is held
391
 */
389
 */
392
static slab_magazine_t * get_full_current_mag(slab_cache_t *cache)
390
static slab_magazine_t *get_full_current_mag(slab_cache_t *cache)
393
{
391
{
394
    slab_magazine_t *cmag, *lastmag, *newmag;
392
    slab_magazine_t *cmag, *lastmag, *newmag;
395
 
393
 
396
    cmag = cache->mag_cache[CPU->id].current;
394
    cmag = cache->mag_cache[CPU->id].current;
397
    lastmag = cache->mag_cache[CPU->id].last;
395
    lastmag = cache->mag_cache[CPU->id].last;
Line 421... Line 419...
421
/**
419
/**
422
 * Try to find object in CPU-cache magazines
420
 * Try to find object in CPU-cache magazines
423
 *
421
 *
424
 * @return Pointer to object or NULL if not available
422
 * @return Pointer to object or NULL if not available
425
 */
423
 */
426
static void * magazine_obj_get(slab_cache_t *cache)
424
static void *magazine_obj_get(slab_cache_t *cache)
427
{
425
{
428
    slab_magazine_t *mag;
426
    slab_magazine_t *mag;
429
    void *obj;
427
    void *obj;
430
 
428
 
431
    if (!CPU)
429
    if (!CPU)
Line 456... Line 454...
456
 *  If full, try the last.
454
 *  If full, try the last.
457
 *   If full, put to magazines list.
455
 *   If full, put to magazines list.
458
 *   allocate new, exchange last & current
456
 *   allocate new, exchange last & current
459
 *
457
 *
460
 */
458
 */
461
static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache)
459
static slab_magazine_t *make_empty_current_mag(slab_cache_t *cache)
462
{
460
{
463
    slab_magazine_t *cmag,*lastmag,*newmag;
461
    slab_magazine_t *cmag,*lastmag,*newmag;
464
 
462
 
465
    cmag = cache->mag_cache[CPU->id].current;
463
    cmag = cache->mag_cache[CPU->id].current;
466
    lastmag = cache->mag_cache[CPU->id].last;
464
    lastmag = cache->mag_cache[CPU->id].last;
Line 528... Line 526...
528
 
526
 
529
/** Return number of objects that fit in certain cache size */
527
/** Return number of objects that fit in certain cache size */
530
static unsigned int comp_objects(slab_cache_t *cache)
528
static unsigned int comp_objects(slab_cache_t *cache)
531
{
529
{
532
    if (cache->flags & SLAB_CACHE_SLINSIDE)
530
    if (cache->flags & SLAB_CACHE_SLINSIDE)
533
        return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
531
        return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) /
-
 
532
            cache->size;
534
    else
533
    else
535
        return (PAGE_SIZE << cache->order) / cache->size;
534
        return (PAGE_SIZE << cache->order) / cache->size;
536
}
535
}
537
 
536
 
538
/** Return wasted space in slab */
537
/** Return wasted space in slab */
Line 555... Line 554...
555
{
554
{
556
    unsigned int i;
555
    unsigned int i;
557
   
556
   
558
    ASSERT(_slab_initialized >= 2);
557
    ASSERT(_slab_initialized >= 2);
559
 
558
 
560
    cache->mag_cache = malloc(sizeof(slab_mag_cache_t) * config.cpu_count,0);
559
    cache->mag_cache = malloc(sizeof(slab_mag_cache_t) * config.cpu_count,
-
 
560
        0);
561
    for (i = 0; i < config.cpu_count; i++) {
561
    for (i = 0; i < config.cpu_count; i++) {
562
        memsetb(&cache->mag_cache[i], sizeof(cache->mag_cache[i]), 0);
562
        memsetb(&cache->mag_cache[i], sizeof(cache->mag_cache[i]), 0);
563
        spinlock_initialize(&cache->mag_cache[i].lock, "slab_maglock_cpu");
563
        spinlock_initialize(&cache->mag_cache[i].lock,
-
 
564
            "slab_maglock_cpu");
564
    }
565
    }
565
}
566
}
566
 
567
 
567
/** Initialize allocated memory as a slab cache */
568
/** Initialize allocated memory as a slab cache */
568
static void
569
static void
569
_slab_cache_create(slab_cache_t *cache,
570
_slab_cache_create(slab_cache_t *cache, char *name, size_t size, size_t align,
570
           char *name,
-
 
571
           size_t size,
-
 
572
           size_t align,
-
 
573
           int (*constructor)(void *obj, int kmflag),
571
    int (*constructor)(void *obj, int kmflag), int (*destructor)(void *obj),
574
           int (*destructor)(void *obj),
-
 
575
           int flags)
572
    int flags)
576
{
573
{
577
    int pages;
574
    int pages;
578
    ipl_t ipl;
575
    ipl_t ipl;
579
 
576
 
580
    memsetb(cache, sizeof(*cache), 0);
577
    memsetb(cache, sizeof(*cache), 0);
Line 593... Line 590...
593
    list_initialize(&cache->full_slabs);
590
    list_initialize(&cache->full_slabs);
594
    list_initialize(&cache->partial_slabs);
591
    list_initialize(&cache->partial_slabs);
595
    list_initialize(&cache->magazines);
592
    list_initialize(&cache->magazines);
596
    spinlock_initialize(&cache->slablock, "slab_lock");
593
    spinlock_initialize(&cache->slablock, "slab_lock");
597
    spinlock_initialize(&cache->maglock, "slab_maglock");
594
    spinlock_initialize(&cache->maglock, "slab_maglock");
598
    if (! (cache->flags & SLAB_CACHE_NOMAGAZINE))
595
    if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
599
        make_magcache(cache);
596
        make_magcache(cache);
600
 
597
 
601
    /* Compute slab sizes, object counts in slabs etc. */
598
    /* Compute slab sizes, object counts in slabs etc. */
602
    if (cache->size < SLAB_INSIDE_SIZE)
599
    if (cache->size < SLAB_INSIDE_SIZE)
603
        cache->flags |= SLAB_CACHE_SLINSIDE;
600
        cache->flags |= SLAB_CACHE_SLINSIDE;
Line 606... Line 603...
606
    pages = SIZE2FRAMES(cache->size);
603
    pages = SIZE2FRAMES(cache->size);
607
    /* We need the 2^order >= pages */
604
    /* We need the 2^order >= pages */
608
    if (pages == 1)
605
    if (pages == 1)
609
        cache->order = 0;
606
        cache->order = 0;
610
    else
607
    else
611
        cache->order = fnzb(pages-1)+1;
608
        cache->order = fnzb(pages - 1) + 1;
612
 
609
 
613
    while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
610
    while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
614
        cache->order += 1;
611
        cache->order += 1;
615
    }
612
    }
616
    cache->objects = comp_objects(cache);
613
    cache->objects = comp_objects(cache);
Line 627... Line 624...
627
    spinlock_unlock(&slab_cache_lock);
624
    spinlock_unlock(&slab_cache_lock);
628
    interrupts_restore(ipl);
625
    interrupts_restore(ipl);
629
}
626
}
630
 
627
 
631
/** Create slab cache  */
628
/** Create slab cache  */
632
slab_cache_t * slab_cache_create(char *name,
-
 
633
                 size_t size,
629
slab_cache_t *
634
                 size_t align,
630
slab_cache_create(char *name, size_t size, size_t align,
635
                 int (*constructor)(void *obj, int kmflag),
631
    int (*constructor)(void *obj, int kmflag), int (*destructor)(void *obj),
636
                 int (*destructor)(void *obj),
-
 
637
                 int flags)
632
    int flags)
638
{
633
{
639
    slab_cache_t *cache;
634
    slab_cache_t *cache;
640
 
635
 
641
    cache = slab_alloc(&slab_cache_cache, 0);
636
    cache = slab_alloc(&slab_cache_cache, 0);
642
    _slab_cache_create(cache, name, size, align, constructor, destructor,
637
    _slab_cache_create(cache, name, size, align, constructor, destructor,
643
               flags);
638
        flags);
644
    return cache;
639
    return cache;
645
}
640
}
646
 
641
 
647
/**
642
/**
648
 * Reclaim space occupied by objects that are already free
643
 * Reclaim space occupied by objects that are already free
Line 662... Line 657...
662
 
657
 
663
    /* We count up to original magazine count to avoid
658
    /* We count up to original magazine count to avoid
664
     * endless loop
659
     * endless loop
665
     */
660
     */
666
    magcount = atomic_get(&cache->magazine_counter);
661
    magcount = atomic_get(&cache->magazine_counter);
667
    while (magcount-- && (mag=get_mag_from_cache(cache,0))) {
662
    while (magcount-- && (mag=get_mag_from_cache(cache, 0))) {
668
        frames += magazine_destroy(cache,mag);
663
        frames += magazine_destroy(cache,mag);
669
        if (!(flags & SLAB_RECLAIM_ALL) && frames)
664
        if (!(flags & SLAB_RECLAIM_ALL) && frames)
670
            break;
665
            break;
671
    }
666
    }
672
   
667
   
Line 715... Line 710...
715
   
710
   
716
    /* Destroy all magazines */
711
    /* Destroy all magazines */
717
    _slab_reclaim(cache, SLAB_RECLAIM_ALL);
712
    _slab_reclaim(cache, SLAB_RECLAIM_ALL);
718
 
713
 
719
    /* All slabs must be empty */
714
    /* All slabs must be empty */
720
    if (!list_empty(&cache->full_slabs) \
715
    if (!list_empty(&cache->full_slabs) ||
721
        || !list_empty(&cache->partial_slabs))
716
        !list_empty(&cache->partial_slabs))
722
        panic("Destroying cache that is not empty.");
717
        panic("Destroying cache that is not empty.");
723
 
718
 
724
    if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
719
    if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
725
        free(cache->mag_cache);
720
        free(cache->mag_cache);
726
    slab_free(&slab_cache_cache, cache);
721
    slab_free(&slab_cache_cache, cache);
727
}
722
}
728
 
723
 
729
/** Allocate new object from cache - if no flags given, always returns
724
/** Allocate new object from cache - if no flags given, always returns memory */
730
    memory */
-
 
731
void * slab_alloc(slab_cache_t *cache, int flags)
725
void *slab_alloc(slab_cache_t *cache, int flags)
732
{
726
{
733
    ipl_t ipl;
727
    ipl_t ipl;
734
    void *result = NULL;
728
    void *result = NULL;
735
   
729
   
736
    /* Disable interrupts to avoid deadlocks with interrupt handlers */
730
    /* Disable interrupts to avoid deadlocks with interrupt handlers */
Line 755... Line 749...
755
{
749
{
756
    ipl_t ipl;
750
    ipl_t ipl;
757
 
751
 
758
    ipl = interrupts_disable();
752
    ipl = interrupts_disable();
759
 
753
 
760
    if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
754
    if ((cache->flags & SLAB_CACHE_NOMAGAZINE) ||
761
        || magazine_obj_put(cache, obj)) {
755
        magazine_obj_put(cache, obj)) {
762
 
-
 
763
        slab_obj_destroy(cache, obj, slab);
756
        slab_obj_destroy(cache, obj, slab);
764
 
757
 
765
    }
758
    }
766
    interrupts_restore(ipl);
759
    interrupts_restore(ipl);
767
    atomic_dec(&cache->allocated_objs);
760
    atomic_dec(&cache->allocated_objs);
Line 784... Line 777...
784
 
777
 
785
    /* TODO: Add assert, that interrupts are disabled, otherwise
778
    /* TODO: Add assert, that interrupts are disabled, otherwise
786
     * memory allocation from interrupts can deadlock.
779
     * memory allocation from interrupts can deadlock.
787
     */
780
     */
788
 
781
 
789
    for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
782
    for (cur = slab_cache_list.next; cur != &slab_cache_list;
-
 
783
        cur = cur->next) {
790
        cache = list_get_instance(cur, slab_cache_t, link);
784
        cache = list_get_instance(cur, slab_cache_t, link);
791
        frames += _slab_reclaim(cache, flags);
785
        frames += _slab_reclaim(cache, flags);
792
    }
786
    }
793
 
787
 
794
    spinlock_unlock(&slab_cache_lock);
788
    spinlock_unlock(&slab_cache_lock);
Line 804... Line 798...
804
    link_t *cur;
798
    link_t *cur;
805
    ipl_t ipl;
799
    ipl_t ipl;
806
   
800
   
807
    ipl = interrupts_disable();
801
    ipl = interrupts_disable();
808
    spinlock_lock(&slab_cache_lock);
802
    spinlock_lock(&slab_cache_lock);
809
    printf("slab name        size     pages  obj/pg slabs  cached allocated ctl\n");
803
    printf("slab name        size     pages  obj/pg slabs  cached allocated"
-
 
804
        " ctl\n");
810
    printf("---------------- -------- ------ ------ ------ ------ --------- ---\n");
805
    printf("---------------- -------- ------ ------ ------ ------ ---------"
-
 
806
        " ---\n");
811
   
807
   
812
    for (cur = slab_cache_list.next; cur != &slab_cache_list; cur = cur->next) {
808
    for (cur = slab_cache_list.next; cur != &slab_cache_list;
-
 
809
        cur = cur->next) {
813
        cache = list_get_instance(cur, slab_cache_t, link);
810
        cache = list_get_instance(cur, slab_cache_t, link);
814
       
811
       
815
        printf("%-16s %8" PRIs " %6d %6u %6ld %6ld %9ld %-3s\n",
812
        printf("%-16s %8" PRIs " %6d %6u %6ld %6ld %9ld %-3s\n",
816
            cache->name, cache->size, (1 << cache->order), cache->objects,
813
            cache->name, cache->size, (1 << cache->order),
817
            atomic_get(&cache->allocated_slabs), atomic_get(&cache->cached_objs),
814
            cache->objects, atomic_get(&cache->allocated_slabs),
-
 
815
            atomic_get(&cache->cached_objs),
-
 
816
            atomic_get(&cache->allocated_objs),
818
            atomic_get(&cache->allocated_objs), cache->flags & SLAB_CACHE_SLINSIDE ? "in" : "out");
817
            cache->flags & SLAB_CACHE_SLINSIDE ? "in" : "out");
819
    }
818
    }
820
    spinlock_unlock(&slab_cache_lock);
819
    spinlock_unlock(&slab_cache_lock);
821
    interrupts_restore(ipl);
820
    interrupts_restore(ipl);
822
}
821
}
823
 
822
 
824
void slab_cache_init(void)
823
void slab_cache_init(void)
825
{
824
{
826
    int i, size;
825
    int i, size;
827
 
826
 
828
    /* Initialize magazine cache */
827
    /* Initialize magazine cache */
829
    _slab_cache_create(&mag_cache,
828
    _slab_cache_create(&mag_cache, "slab_magazine",
830
               "slab_magazine",
-
 
831
               sizeof(slab_magazine_t) + SLAB_MAG_SIZE * sizeof(void*),
829
        sizeof(slab_magazine_t) + SLAB_MAG_SIZE * sizeof(void*),
832
               sizeof(uintptr_t),
830
        sizeof(uintptr_t), NULL, NULL, SLAB_CACHE_NOMAGAZINE |
833
               NULL, NULL,
-
 
834
               SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
831
        SLAB_CACHE_SLINSIDE);
835
    /* Initialize slab_cache cache */
832
    /* Initialize slab_cache cache */
836
    _slab_cache_create(&slab_cache_cache,
833
    _slab_cache_create(&slab_cache_cache, "slab_cache",
837
               "slab_cache",
-
 
838
               sizeof(slab_cache_cache),
834
        sizeof(slab_cache_cache), sizeof(uintptr_t), NULL, NULL,
839
               sizeof(uintptr_t),
-
 
840
               NULL, NULL,
-
 
841
               SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
835
        SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
842
    /* Initialize external slab cache */
836
    /* Initialize external slab cache */
843
    slab_extern_cache = slab_cache_create("slab_extern",
837
    slab_extern_cache = slab_cache_create("slab_extern", sizeof(slab_t), 0,
844
                          sizeof(slab_t),
-
 
845
                          0, NULL, NULL,
-
 
846
                          SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED);
838
        NULL, NULL, SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED);
847
 
839
 
848
    /* Initialize structures for malloc */
840
    /* Initialize structures for malloc */
849
    for (i=0, size=(1 << SLAB_MIN_MALLOC_W);
841
    for (i = 0, size = (1 << SLAB_MIN_MALLOC_W);
850
         i < (SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1);
842
        i < (SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1);
851
         i++, size <<= 1) {
843
        i++, size <<= 1) {
852
        malloc_caches[i] = slab_cache_create(malloc_names[i],
844
        malloc_caches[i] = slab_cache_create(malloc_names[i], size, 0,
853
                             size, 0,
-
 
854
                             NULL,NULL, SLAB_CACHE_MAGDEFERRED);
845
            NULL, NULL, SLAB_CACHE_MAGDEFERRED);
855
    }
846
    }
856
#ifdef CONFIG_DEBUG       
847
#ifdef CONFIG_DEBUG       
857
    _slab_initialized = 1;
848
    _slab_initialized = 1;
858
#endif
849
#endif
859
}
850
}
Line 874... Line 865...
874
    _slab_initialized = 2;
865
    _slab_initialized = 2;
875
#endif
866
#endif
876
 
867
 
877
    spinlock_lock(&slab_cache_lock);
868
    spinlock_lock(&slab_cache_lock);
878
   
869
   
879
    for (cur=slab_cache_list.next; cur != &slab_cache_list;cur=cur->next){
870
    for (cur = slab_cache_list.next; cur != &slab_cache_list;
-
 
871
        cur = cur->next){
880
        s = list_get_instance(cur, slab_cache_t, link);
872
        s = list_get_instance(cur, slab_cache_t, link);
881
        if ((s->flags & SLAB_CACHE_MAGDEFERRED) != SLAB_CACHE_MAGDEFERRED)
873
        if ((s->flags & SLAB_CACHE_MAGDEFERRED) !=
-
 
874
            SLAB_CACHE_MAGDEFERRED)
882
            continue;
875
            continue;
883
        make_magcache(s);
876
        make_magcache(s);
884
        s->flags &= ~SLAB_CACHE_MAGDEFERRED;
877
        s->flags &= ~SLAB_CACHE_MAGDEFERRED;
885
    }
878
    }
886
 
879
 
887
    spinlock_unlock(&slab_cache_lock);
880
    spinlock_unlock(&slab_cache_lock);
888
}
881
}
889
 
882
 
890
/**************************************/
883
/**************************************/
891
/* kalloc/kfree functions             */
884
/* kalloc/kfree functions             */
892
void * malloc(unsigned int size, int flags)
885
void *malloc(unsigned int size, int flags)
893
{
886
{
894
    ASSERT(_slab_initialized);
887
    ASSERT(_slab_initialized);
895
    ASSERT(size && size <= (1 << SLAB_MAX_MALLOC_W));
888
    ASSERT(size && size <= (1 << SLAB_MAX_MALLOC_W));
896
   
889
   
897
    if (size < (1 << SLAB_MIN_MALLOC_W))
890
    if (size < (1 << SLAB_MIN_MALLOC_W))
Line 900... Line 893...
900
    int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;
893
    int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;
901
 
894
 
902
    return slab_alloc(malloc_caches[idx], flags);
895
    return slab_alloc(malloc_caches[idx], flags);
903
}
896
}
904
 
897
 
905
void * realloc(void *ptr, unsigned int size, int flags)
898
void *realloc(void *ptr, unsigned int size, int flags)
906
{
899
{
907
    ASSERT(_slab_initialized);
900
    ASSERT(_slab_initialized);
908
    ASSERT(size <= (1 << SLAB_MAX_MALLOC_W));
901
    ASSERT(size <= (1 << SLAB_MAX_MALLOC_W));
909
   
902
   
910
    void *new_ptr;
903
    void *new_ptr;