Subversion Repositories HelenOS

Rev

Rev 3022 | Rev 4420 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 3022 Rev 4055
Line 165... Line 165...
165
 
165
 
166
/**
166
/**
167
 * Allocate frames for slab space and initialize
167
 * Allocate frames for slab space and initialize
168
 *
168
 *
169
 */
169
 */
170
static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
170
static slab_t *slab_space_alloc(slab_cache_t *cache, int flags)
171
{
171
{
172
    void *data;
172
    void *data;
173
    slab_t *slab;
173
    slab_t *slab;
174
    size_t fsize;
174
    size_t fsize;
175
    unsigned int i;
175
    unsigned int i;
176
    unsigned int zone = 0;
176
    count_t zone = 0;
177
   
177
   
178
    data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone);
178
    data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone);
179
    if (!data) {
179
    if (!data) {
180
        return NULL;
180
        return NULL;
181
    }
181
    }
182
    if (! (cache->flags & SLAB_CACHE_SLINSIDE)) {
182
    if (!(cache->flags & SLAB_CACHE_SLINSIDE)) {
183
        slab = slab_alloc(slab_extern_cache, flags);
183
        slab = slab_alloc(slab_extern_cache, flags);
184
        if (!slab) {
184
        if (!slab) {
185
            frame_free(KA2PA(data));
185
            frame_free(KA2PA(data));
186
            return NULL;
186
            return NULL;
187
        }
187
        }
Line 198... Line 198...
198
    slab->available = cache->objects;
198
    slab->available = cache->objects;
199
    slab->nextavail = 0;
199
    slab->nextavail = 0;
200
    slab->cache = cache;
200
    slab->cache = cache;
201
 
201
 
202
    for (i = 0; i < cache->objects; i++)
202
    for (i = 0; i < cache->objects; i++)
203
        *((int *) (slab->start + i*cache->size)) = i+1;
203
        *((int *) (slab->start + i*cache->size)) = i + 1;
204
 
204
 
205
    atomic_inc(&cache->allocated_slabs);
205
    atomic_inc(&cache->allocated_slabs);
206
    return slab;
206
    return slab;
207
}
207
}
208
 
208
 
Line 237... Line 237...
237
 *
237
 *
238
 * @param slab If the caller knows directly slab of the object, otherwise NULL
238
 * @param slab If the caller knows directly slab of the object, otherwise NULL
239
 *
239
 *
240
 * @return Number of freed pages
240
 * @return Number of freed pages
241
 */
241
 */
242
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
242
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, slab_t *slab)
243
                slab_t *slab)
-
 
244
{
243
{
245
    int freed = 0;
244
    int freed = 0;
246
 
245
 
247
    if (!slab)
246
    if (!slab)
248
        slab = obj2slab(obj);
247
        slab = obj2slab(obj);
Line 254... Line 253...
254
   
253
   
255
    spinlock_lock(&cache->slablock);
254
    spinlock_lock(&cache->slablock);
256
    ASSERT(slab->available < cache->objects);
255
    ASSERT(slab->available < cache->objects);
257
 
256
 
258
    *((int *)obj) = slab->nextavail;
257
    *((int *)obj) = slab->nextavail;
259
    slab->nextavail = (obj - slab->start)/cache->size;
258
    slab->nextavail = (obj - slab->start) / cache->size;
260
    slab->available++;
259
    slab->available++;
261
 
260
 
262
    /* Move it to correct list */
261
    /* Move it to correct list */
263
    if (slab->available == cache->objects) {
262
    if (slab->available == cache->objects) {
264
        /* Free associated memory */
263
        /* Free associated memory */
Line 279... Line 278...
279
/**
278
/**
280
 * Take new object from slab or create new if needed
279
 * Take new object from slab or create new if needed
281
 *
280
 *
282
 * @return Object address or null
281
 * @return Object address or null
283
 */
282
 */
284
static void * slab_obj_create(slab_cache_t *cache, int flags)
283
static void *slab_obj_create(slab_cache_t *cache, int flags)
285
{
284
{
286
    slab_t *slab;
285
    slab_t *slab;
287
    void *obj;
286
    void *obj;
288
 
287
 
289
    spinlock_lock(&cache->slablock);
288
    spinlock_lock(&cache->slablock);
Line 299... Line 298...
299
        slab = slab_space_alloc(cache, flags);
298
        slab = slab_space_alloc(cache, flags);
300
        if (!slab)
299
        if (!slab)
301
            return NULL;
300
            return NULL;
302
        spinlock_lock(&cache->slablock);
301
        spinlock_lock(&cache->slablock);
303
    } else {
302
    } else {
304
        slab = list_get_instance(cache->partial_slabs.next, slab_t, link);
303
        slab = list_get_instance(cache->partial_slabs.next, slab_t,
-
 
304
            link);
305
        list_remove(&slab->link);
305
        list_remove(&slab->link);
306
    }
306
    }
307
    obj = slab->start + slab->nextavail * cache->size;
307
    obj = slab->start + slab->nextavail * cache->size;
308
    slab->nextavail = *((int *)obj);
308
    slab->nextavail = *((int *)obj);
309
    slab->available--;
309
    slab->available--;
Line 330... Line 330...
330
 * Finds a full magazine in cache, takes it from list
330
 * Finds a full magazine in cache, takes it from list
331
 * and returns it
331
 * and returns it
332
 *
332
 *
333
 * @param first If true, return first, else last mag
333
 * @param first If true, return first, else last mag
334
 */
334
 */
335
static slab_magazine_t * get_mag_from_cache(slab_cache_t *cache,
335
static slab_magazine_t *get_mag_from_cache(slab_cache_t *cache, int first)
336
                        int first)
-
 
337
{
336
{
338
    slab_magazine_t *mag = NULL;
337
    slab_magazine_t *mag = NULL;
339
    link_t *cur;
338
    link_t *cur;
340
 
339
 
341
    spinlock_lock(&cache->maglock);
340
    spinlock_lock(&cache->maglock);
Line 366... Line 365...
366
/**
365
/**
367
 * Free all objects in magazine and free memory associated with magazine
366
 * Free all objects in magazine and free memory associated with magazine
368
 *
367
 *
369
 * @return Number of freed pages
368
 * @return Number of freed pages
370
 */
369
 */
371
static count_t magazine_destroy(slab_cache_t *cache,
370
static count_t magazine_destroy(slab_cache_t *cache, slab_magazine_t *mag)
372
                slab_magazine_t *mag)
-
 
373
{
371
{
374
    unsigned int i;
372
    unsigned int i;
375
    count_t frames = 0;
373
    count_t frames = 0;
376
 
374
 
377
    for (i = 0; i < mag->busy; i++) {
375
    for (i = 0; i < mag->busy; i++) {
Line 387... Line 385...
387
/**
385
/**
388
 * Find full magazine, set it as current and return it
386
 * Find full magazine, set it as current and return it
389
 *
387
 *
390
 * Assume cpu_magazine lock is held
388
 * Assume cpu_magazine lock is held
391
 */
389
 */
392
static slab_magazine_t * get_full_current_mag(slab_cache_t *cache)
390
static slab_magazine_t *get_full_current_mag(slab_cache_t *cache)
393
{
391
{
394
    slab_magazine_t *cmag, *lastmag, *newmag;
392
    slab_magazine_t *cmag, *lastmag, *newmag;
395
 
393
 
396
    cmag = cache->mag_cache[CPU->id].current;
394
    cmag = cache->mag_cache[CPU->id].current;
397
    lastmag = cache->mag_cache[CPU->id].last;
395
    lastmag = cache->mag_cache[CPU->id].last;
Line 421... Line 419...
421
/**
419
/**
422
 * Try to find object in CPU-cache magazines
420
 * Try to find object in CPU-cache magazines
423
 *
421
 *
424
 * @return Pointer to object or NULL if not available
422
 * @return Pointer to object or NULL if not available
425
 */
423
 */
426
static void * magazine_obj_get(slab_cache_t *cache)
424
static void *magazine_obj_get(slab_cache_t *cache)
427
{
425
{
428
    slab_magazine_t *mag;
426
    slab_magazine_t *mag;
429
    void *obj;
427
    void *obj;
430
 
428
 
431
    if (!CPU)
429
    if (!CPU)
Line 456... Line 454...
456
 *  If full, try the last.
454
 *  If full, try the last.
457
 *   If full, put to magazines list.
455
 *   If full, put to magazines list.
458
 *   allocate new, exchange last & current
456
 *   allocate new, exchange last & current
459
 *
457
 *
460
 */
458
 */
461
static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache)
459
static slab_magazine_t *make_empty_current_mag(slab_cache_t *cache)
462
{
460
{
463
    slab_magazine_t *cmag,*lastmag,*newmag;
461
    slab_magazine_t *cmag,*lastmag,*newmag;
464
 
462
 
465
    cmag = cache->mag_cache[CPU->id].current;
463
    cmag = cache->mag_cache[CPU->id].current;
466
    lastmag = cache->mag_cache[CPU->id].last;
464
    lastmag = cache->mag_cache[CPU->id].last;
Line 528... Line 526...
528
 
526
 
529
/** Return number of objects that fit in certain cache size */
527
/** Return number of objects that fit in certain cache size */
530
static unsigned int comp_objects(slab_cache_t *cache)
528
static unsigned int comp_objects(slab_cache_t *cache)
531
{
529
{
532
    if (cache->flags & SLAB_CACHE_SLINSIDE)
530
    if (cache->flags & SLAB_CACHE_SLINSIDE)
533
        return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
531
        return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) /
-
 
532
            cache->size;
534
    else
533
    else
535
        return (PAGE_SIZE << cache->order) / cache->size;
534
        return (PAGE_SIZE << cache->order) / cache->size;
536
}
535
}
537
 
536
 
538
/** Return wasted space in slab */
537
/** Return wasted space in slab */
Line 555... Line 554...
555
{
554
{
556
    unsigned int i;
555
    unsigned int i;
557
   
556
   
558
    ASSERT(_slab_initialized >= 2);
557
    ASSERT(_slab_initialized >= 2);
559
 
558
 
560
    cache->mag_cache = malloc(sizeof(slab_mag_cache_t)*config.cpu_count,0);
559
    cache->mag_cache = malloc(sizeof(slab_mag_cache_t) * config.cpu_count,
-
 
560
        0);
561
    for (i = 0; i < config.cpu_count; i++) {
561
    for (i = 0; i < config.cpu_count; i++) {
562
        memsetb((uintptr_t)&cache->mag_cache[i],
562
        memsetb(&cache->mag_cache[i], sizeof(cache->mag_cache[i]), 0);
563
            sizeof(cache->mag_cache[i]), 0);
563
        spinlock_initialize(&cache->mag_cache[i].lock,
564
        spinlock_initialize(&cache->mag_cache[i].lock, "slab_maglock_cpu");
564
            "slab_maglock_cpu");
565
    }
565
    }
566
}
566
}
567
 
567
 
568
/** Initialize allocated memory as a slab cache */
568
/** Initialize allocated memory as a slab cache */
569
static void
569
static void
570
_slab_cache_create(slab_cache_t *cache,
570
_slab_cache_create(slab_cache_t *cache, char *name, size_t size, size_t align,
571
           char *name,
-
 
572
           size_t size,
-
 
573
           size_t align,
-
 
574
           int (*constructor)(void *obj, int kmflag),
571
    int (*constructor)(void *obj, int kmflag), int (*destructor)(void *obj),
575
           int (*destructor)(void *obj),
-
 
576
           int flags)
572
    int flags)
577
{
573
{
578
    int pages;
574
    int pages;
579
    ipl_t ipl;
575
    ipl_t ipl;
580
 
576
 
581
    memsetb((uintptr_t)cache, sizeof(*cache), 0);
577
    memsetb(cache, sizeof(*cache), 0);
582
    cache->name = name;
578
    cache->name = name;
583
 
579
 
584
    if (align < sizeof(unative_t))
580
    if (align < sizeof(unative_t))
585
        align = sizeof(unative_t);
581
        align = sizeof(unative_t);
586
    size = ALIGN_UP(size, align);
582
    size = ALIGN_UP(size, align);
Line 594... Line 590...
594
    list_initialize(&cache->full_slabs);
590
    list_initialize(&cache->full_slabs);
595
    list_initialize(&cache->partial_slabs);
591
    list_initialize(&cache->partial_slabs);
596
    list_initialize(&cache->magazines);
592
    list_initialize(&cache->magazines);
597
    spinlock_initialize(&cache->slablock, "slab_lock");
593
    spinlock_initialize(&cache->slablock, "slab_lock");
598
    spinlock_initialize(&cache->maglock, "slab_maglock");
594
    spinlock_initialize(&cache->maglock, "slab_maglock");
599
    if (! (cache->flags & SLAB_CACHE_NOMAGAZINE))
595
    if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
600
        make_magcache(cache);
596
        make_magcache(cache);
601
 
597
 
602
    /* Compute slab sizes, object counts in slabs etc. */
598
    /* Compute slab sizes, object counts in slabs etc. */
603
    if (cache->size < SLAB_INSIDE_SIZE)
599
    if (cache->size < SLAB_INSIDE_SIZE)
604
        cache->flags |= SLAB_CACHE_SLINSIDE;
600
        cache->flags |= SLAB_CACHE_SLINSIDE;
Line 607... Line 603...
607
    pages = SIZE2FRAMES(cache->size);
603
    pages = SIZE2FRAMES(cache->size);
608
    /* We need the 2^order >= pages */
604
    /* We need the 2^order >= pages */
609
    if (pages == 1)
605
    if (pages == 1)
610
        cache->order = 0;
606
        cache->order = 0;
611
    else
607
    else
612
        cache->order = fnzb(pages-1)+1;
608
        cache->order = fnzb(pages - 1) + 1;
613
 
609
 
614
    while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
610
    while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
615
        cache->order += 1;
611
        cache->order += 1;
616
    }
612
    }
617
    cache->objects = comp_objects(cache);
613
    cache->objects = comp_objects(cache);
Line 628... Line 624...
628
    spinlock_unlock(&slab_cache_lock);
624
    spinlock_unlock(&slab_cache_lock);
629
    interrupts_restore(ipl);
625
    interrupts_restore(ipl);
630
}
626
}
631
 
627
 
632
/** Create slab cache  */
628
/** Create slab cache  */
633
slab_cache_t * slab_cache_create(char *name,
-
 
634
                 size_t size,
629
slab_cache_t *
635
                 size_t align,
630
slab_cache_create(char *name, size_t size, size_t align,
636
                 int (*constructor)(void *obj, int kmflag),
631
    int (*constructor)(void *obj, int kmflag), int (*destructor)(void *obj),
637
                 int (*destructor)(void *obj),
-
 
638
                 int flags)
632
    int flags)
639
{
633
{
640
    slab_cache_t *cache;
634
    slab_cache_t *cache;
641
 
635
 
642
    cache = slab_alloc(&slab_cache_cache, 0);
636
    cache = slab_alloc(&slab_cache_cache, 0);
643
    _slab_cache_create(cache, name, size, align, constructor, destructor,
637
    _slab_cache_create(cache, name, size, align, constructor, destructor,
644
               flags);
638
        flags);
645
    return cache;
639
    return cache;
646
}
640
}
647
 
641
 
648
/**
642
/**
649
 * Reclaim space occupied by objects that are already free
643
 * Reclaim space occupied by objects that are already free
Line 663... Line 657...
663
 
657
 
664
    /* We count up to original magazine count to avoid
658
    /* We count up to original magazine count to avoid
665
     * endless loop
659
     * endless loop
666
     */
660
     */
667
    magcount = atomic_get(&cache->magazine_counter);
661
    magcount = atomic_get(&cache->magazine_counter);
668
    while (magcount-- && (mag=get_mag_from_cache(cache,0))) {
662
    while (magcount-- && (mag=get_mag_from_cache(cache, 0))) {
669
        frames += magazine_destroy(cache,mag);
663
        frames += magazine_destroy(cache,mag);
670
        if (!(flags & SLAB_RECLAIM_ALL) && frames)
664
        if (!(flags & SLAB_RECLAIM_ALL) && frames)
671
            break;
665
            break;
672
    }
666
    }
673
   
667
   
Line 716... Line 710...
716
   
710
   
717
    /* Destroy all magazines */
711
    /* Destroy all magazines */
718
    _slab_reclaim(cache, SLAB_RECLAIM_ALL);
712
    _slab_reclaim(cache, SLAB_RECLAIM_ALL);
719
 
713
 
720
    /* All slabs must be empty */
714
    /* All slabs must be empty */
721
    if (!list_empty(&cache->full_slabs) \
715
    if (!list_empty(&cache->full_slabs) ||
722
        || !list_empty(&cache->partial_slabs))
716
        !list_empty(&cache->partial_slabs))
723
        panic("Destroying cache that is not empty.");
717
        panic("Destroying cache that is not empty.");
724
 
718
 
725
    if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
719
    if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
726
        free(cache->mag_cache);
720
        free(cache->mag_cache);
727
    slab_free(&slab_cache_cache, cache);
721
    slab_free(&slab_cache_cache, cache);
728
}
722
}
729
 
723
 
730
/** Allocate new object from cache - if no flags given, always returns
724
/** Allocate new object from cache - if no flags given, always returns memory */
731
    memory */
-
 
732
void * slab_alloc(slab_cache_t *cache, int flags)
725
void *slab_alloc(slab_cache_t *cache, int flags)
733
{
726
{
734
    ipl_t ipl;
727
    ipl_t ipl;
735
    void *result = NULL;
728
    void *result = NULL;
736
   
729
   
737
    /* Disable interrupts to avoid deadlocks with interrupt handlers */
730
    /* Disable interrupts to avoid deadlocks with interrupt handlers */
Line 756... Line 749...
756
{
749
{
757
    ipl_t ipl;
750
    ipl_t ipl;
758
 
751
 
759
    ipl = interrupts_disable();
752
    ipl = interrupts_disable();
760
 
753
 
761
    if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
754
    if ((cache->flags & SLAB_CACHE_NOMAGAZINE) ||
762
        || magazine_obj_put(cache, obj)) {
755
        magazine_obj_put(cache, obj)) {
763
 
-
 
764
        slab_obj_destroy(cache, obj, slab);
756
        slab_obj_destroy(cache, obj, slab);
765
 
757
 
766
    }
758
    }
767
    interrupts_restore(ipl);
759
    interrupts_restore(ipl);
768
    atomic_dec(&cache->allocated_objs);
760
    atomic_dec(&cache->allocated_objs);
Line 785... Line 777...
785
 
777
 
786
    /* TODO: Add assert, that interrupts are disabled, otherwise
778
    /* TODO: Add assert, that interrupts are disabled, otherwise
787
     * memory allocation from interrupts can deadlock.
779
     * memory allocation from interrupts can deadlock.
788
     */
780
     */
789
 
781
 
790
    for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
782
    for (cur = slab_cache_list.next; cur != &slab_cache_list;
-
 
783
        cur = cur->next) {
791
        cache = list_get_instance(cur, slab_cache_t, link);
784
        cache = list_get_instance(cur, slab_cache_t, link);
792
        frames += _slab_reclaim(cache, flags);
785
        frames += _slab_reclaim(cache, flags);
793
    }
786
    }
794
 
787
 
795
    spinlock_unlock(&slab_cache_lock);
788
    spinlock_unlock(&slab_cache_lock);
Line 799... Line 792...
799
 
792
 
800
 
793
 
801
/* Print list of slabs */
794
/* Print list of slabs */
802
void slab_print_list(void)
795
void slab_print_list(void)
803
{
796
{
-
 
797
    int skip = 0;
-
 
798
 
-
 
799
    printf("slab name        size     pages  obj/pg slabs  cached allocated"
-
 
800
        " ctl\n");
-
 
801
    printf("---------------- -------- ------ ------ ------ ------ ---------"
-
 
802
        " ---\n");
-
 
803
 
-
 
804
    while (true) {
804
    slab_cache_t *cache;
805
        slab_cache_t *cache;
805
    link_t *cur;
806
        link_t *cur;
806
    ipl_t ipl;
807
        ipl_t ipl;
-
 
808
        int i;
-
 
809
 
-
 
810
        /*
-
 
811
         * We must not hold the slab_cache_lock spinlock when printing
-
 
812
         * the statistics. Otherwise we can easily deadlock if the print
-
 
813
         * needs to allocate memory.
-
 
814
         *
-
 
815
         * Therefore, we walk through the slab cache list, skipping some
-
 
816
         * amount of already processed caches during each iteration and
-
 
817
         * gathering statistics about the first unprocessed cache. For
-
 
818
         * the sake of printing the statistics, we realese the
-
 
819
         * slab_cache_lock and reacquire it afterwards. Then the walk
-
 
820
         * starts again.
-
 
821
         *
-
 
822
         * This limits both the efficiency and also accuracy of the
-
 
823
         * obtained statistics. The efficiency is decreased because the
-
 
824
         * time complexity of the algorithm is quadratic instead of
-
 
825
         * linear. The accuracy is impacted because we drop the lock
-
 
826
         * after processing one cache. If there is someone else
-
 
827
         * manipulating the cache list, we might omit an arbitrary
-
 
828
         * number of caches or process one cache multiple times.
-
 
829
         * However, we don't bleed for this algorithm for it is only
-
 
830
         * statistics.
-
 
831
         */
807
   
832
 
808
    ipl = interrupts_disable();
833
        ipl = interrupts_disable();
809
    spinlock_lock(&slab_cache_lock);
834
        spinlock_lock(&slab_cache_lock);
-
 
835
 
810
    printf("slab name        size     pages  obj/pg slabs  cached allocated ctl\n");
836
        for (i = 0, cur = slab_cache_list.next;
811
    printf("---------------- -------- ------ ------ ------ ------ --------- ---\n");
837
            i < skip && cur != &slab_cache_list;
-
 
838
            i++, cur = cur->next)
-
 
839
            ;
812
   
840
 
813
    for (cur = slab_cache_list.next; cur != &slab_cache_list; cur = cur->next) {
841
        if (cur == &slab_cache_list) {
-
 
842
            spinlock_unlock(&slab_cache_lock);
-
 
843
            interrupts_restore(ipl);
-
 
844
            break;
-
 
845
        }
-
 
846
 
-
 
847
        skip++;
-
 
848
 
814
        cache = list_get_instance(cur, slab_cache_t, link);
849
        cache = list_get_instance(cur, slab_cache_t, link);
-
 
850
 
-
 
851
        char *name = cache->name;
-
 
852
        uint8_t order = cache->order;
-
 
853
        size_t size = cache->size;
-
 
854
        unsigned int objects = cache->objects;
-
 
855
        long allocated_slabs = atomic_get(&cache->allocated_slabs);
-
 
856
        long cached_objs = atomic_get(&cache->cached_objs);
-
 
857
        long allocated_objs = atomic_get(&cache->allocated_objs);
-
 
858
        int flags = cache->flags;
-
 
859
       
-
 
860
        spinlock_unlock(&slab_cache_lock);
-
 
861
        interrupts_restore(ipl);
815
       
862
       
-
 
863
        printf("%-16s %8" PRIs " %6d %6u %6ld %6ld %9ld %-3s\n",
-
 
864
            name, size, (1 << order), objects, allocated_slabs,
-
 
865
            cached_objs, allocated_objs,
816
        printf("%-16s %8zd %6zd %6zd %6zd %6zd %9zd %-3s\n", cache->name, cache->size, (1 << cache->order), cache->objects, atomic_get(&cache->allocated_slabs), atomic_get(&cache->cached_objs), atomic_get(&cache->allocated_objs), cache->flags & SLAB_CACHE_SLINSIDE ? "in" : "out");
866
            flags & SLAB_CACHE_SLINSIDE ? "in" : "out");
817
    }
867
    }
818
    spinlock_unlock(&slab_cache_lock);
-
 
819
    interrupts_restore(ipl);
-
 
820
}
868
}
821
 
869
 
822
void slab_cache_init(void)
870
void slab_cache_init(void)
823
{
871
{
824
    int i, size;
872
    int i, size;
825
 
873
 
826
    /* Initialize magazine cache */
874
    /* Initialize magazine cache */
827
    _slab_cache_create(&mag_cache,
875
    _slab_cache_create(&mag_cache, "slab_magazine",
828
               "slab_magazine",
-
 
829
               sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*),
876
        sizeof(slab_magazine_t) + SLAB_MAG_SIZE * sizeof(void*),
830
               sizeof(uintptr_t),
877
        sizeof(uintptr_t), NULL, NULL, SLAB_CACHE_NOMAGAZINE |
831
               NULL, NULL,
-
 
832
               SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
878
        SLAB_CACHE_SLINSIDE);
833
    /* Initialize slab_cache cache */
879
    /* Initialize slab_cache cache */
834
    _slab_cache_create(&slab_cache_cache,
880
    _slab_cache_create(&slab_cache_cache, "slab_cache",
835
               "slab_cache",
-
 
836
               sizeof(slab_cache_cache),
881
        sizeof(slab_cache_cache), sizeof(uintptr_t), NULL, NULL,
837
               sizeof(uintptr_t),
-
 
838
               NULL, NULL,
-
 
839
               SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
882
        SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
840
    /* Initialize external slab cache */
883
    /* Initialize external slab cache */
841
    slab_extern_cache = slab_cache_create("slab_extern",
884
    slab_extern_cache = slab_cache_create("slab_extern", sizeof(slab_t), 0,
842
                          sizeof(slab_t),
-
 
843
                          0, NULL, NULL,
-
 
844
                          SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED);
885
        NULL, NULL, SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED);
845
 
886
 
846
    /* Initialize structures for malloc */
887
    /* Initialize structures for malloc */
847
    for (i=0, size=(1<<SLAB_MIN_MALLOC_W);
888
    for (i = 0, size = (1 << SLAB_MIN_MALLOC_W);
848
         i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1);
889
        i < (SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1);
849
         i++, size <<= 1) {
890
        i++, size <<= 1) {
850
        malloc_caches[i] = slab_cache_create(malloc_names[i],
891
        malloc_caches[i] = slab_cache_create(malloc_names[i], size, 0,
851
                             size, 0,
-
 
852
                             NULL,NULL, SLAB_CACHE_MAGDEFERRED);
892
            NULL, NULL, SLAB_CACHE_MAGDEFERRED);
853
    }
893
    }
854
#ifdef CONFIG_DEBUG       
894
#ifdef CONFIG_DEBUG       
855
    _slab_initialized = 1;
895
    _slab_initialized = 1;
856
#endif
896
#endif
857
}
897
}
Line 872... Line 912...
872
    _slab_initialized = 2;
912
    _slab_initialized = 2;
873
#endif
913
#endif
874
 
914
 
875
    spinlock_lock(&slab_cache_lock);
915
    spinlock_lock(&slab_cache_lock);
876
   
916
   
877
    for (cur=slab_cache_list.next; cur != &slab_cache_list;cur=cur->next){
917
    for (cur = slab_cache_list.next; cur != &slab_cache_list;
-
 
918
        cur = cur->next){
878
        s = list_get_instance(cur, slab_cache_t, link);
919
        s = list_get_instance(cur, slab_cache_t, link);
879
        if ((s->flags & SLAB_CACHE_MAGDEFERRED) != SLAB_CACHE_MAGDEFERRED)
920
        if ((s->flags & SLAB_CACHE_MAGDEFERRED) !=
-
 
921
            SLAB_CACHE_MAGDEFERRED)
880
            continue;
922
            continue;
881
        make_magcache(s);
923
        make_magcache(s);
882
        s->flags &= ~SLAB_CACHE_MAGDEFERRED;
924
        s->flags &= ~SLAB_CACHE_MAGDEFERRED;
883
    }
925
    }
884
 
926
 
885
    spinlock_unlock(&slab_cache_lock);
927
    spinlock_unlock(&slab_cache_lock);
886
}
928
}
887
 
929
 
888
/**************************************/
930
/**************************************/
889
/* kalloc/kfree functions             */
931
/* kalloc/kfree functions             */
890
void * malloc(unsigned int size, int flags)
932
void *malloc(unsigned int size, int flags)
891
{
933
{
892
    ASSERT(_slab_initialized);
934
    ASSERT(_slab_initialized);
893
    ASSERT(size && size <= (1 << SLAB_MAX_MALLOC_W));
935
    ASSERT(size && size <= (1 << SLAB_MAX_MALLOC_W));
894
   
936
   
895
    if (size < (1 << SLAB_MIN_MALLOC_W))
937
    if (size < (1 << SLAB_MIN_MALLOC_W))
Line 898... Line 940...
898
    int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;
940
    int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1;
899
 
941
 
900
    return slab_alloc(malloc_caches[idx], flags);
942
    return slab_alloc(malloc_caches[idx], flags);
901
}
943
}
902
 
944
 
903
void * realloc(void *ptr, unsigned int size, int flags)
945
void *realloc(void *ptr, unsigned int size, int flags)
904
{
946
{
905
    ASSERT(_slab_initialized);
947
    ASSERT(_slab_initialized);
906
    ASSERT(size <= (1 << SLAB_MAX_MALLOC_W));
948
    ASSERT(size <= (1 << SLAB_MAX_MALLOC_W));
907
   
949
   
908
    void *new_ptr;
950
    void *new_ptr;