Subversion Repositories HelenOS-historic

Rev

Rev 788 | Rev 791 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
759 palkovsky 1
/*
2
 * Copyright (C) 2006 Ondrej Palkovsky
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
769 palkovsky 29
/*
785 jermar 30
 * The SLAB allocator is closely modelled after OpenSolaris SLAB allocator
769 palkovsky 31
 * http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/
32
 *
33
 * with the following exceptions:
34
 *   - empty SLABS are deallocated immediately
35
 *     (in Linux they are kept in linked list, in Solaris ???)
36
 *   - empty magazines are deallocated when not needed
37
 *     (in Solaris they are held in linked list in slab cache)
38
 *
39
 *   Following features are not currently supported but would be easy to do:
40
 *   - cache coloring
41
 *   - dynamic magazine growing (different magazine sizes are already
42
 *     supported, but we would need to adjust allocating strategy)
43
 *
44
 * The SLAB allocator supports per-CPU caches ('magazines') to facilitate
45
 * good SMP scaling.
46
 *
47
 * When a new object is being allocated, it is first checked, if it is
48
 * available in CPU-bound magazine. If it is not found there, it is
49
 * allocated from CPU-shared SLAB - if partial full is found, it is used,
50
 * otherwise a new one is allocated.
51
 *
52
 * When an object is being deallocated, it is put to CPU-bound magazine.
53
 * If there is no such magazine, new one is allocated (if it fails,
54
 * the object is deallocated into SLAB). If the magazine is full, it is
55
 * put into cpu-shared list of magazines and new one is allocated.
56
 *
57
 * The CPU-bound magazine is actually a pair of magazine to avoid
58
 * thrashing when somebody is allocating/deallocating 1 item at the magazine
59
 * size boundary. LIFO order is enforced, which should avoid fragmentation
60
 * as much as possible.
61
 *  
62
 * Every cache contains list of full slabs and list of partialy full slabs.
63
 * Empty SLABS are immediately freed (thrashing will be avoided because
64
 * of magazines).
65
 *
66
 * The SLAB information structure is kept inside the data area, if possible.
67
 * The cache can be marked that it should not use magazines. This is used
68
 * only for SLAB related caches to avoid deadlocks and infinite recursion
69
 * (the SLAB allocator uses itself for allocating all it's control structures).
70
 *
71
 * The SLAB allocator allocates lot of space and does not free it. When
72
 * frame allocator fails to allocate the frame, it calls slab_reclaim().
73
 * It tries 'light reclaim' first, then brutal reclaim. The light reclaim
74
 * releases slabs from cpu-shared magazine-list, until at least 1 slab
75
 * is deallocated in each cache (this algorithm should probably change).
76
 * The brutal reclaim removes all cached objects, even from CPU-bound
77
 * magazines.
78
 *
775 palkovsky 79
 * TODO: For better CPU-scaling the magazine allocation strategy should
80
 * be extended. Currently, if the cache does not have magazine, it asks
81
 * for non-cpu cached magazine cache to provide one. It might be feasible
82
 * to add cpu-cached magazine cache (which would allocate it's magazines
83
 * from non-cpu-cached mag. cache). This would provide a nice per-cpu
84
 * buffer. The other possibility is to use the per-cache
85
 * 'empty-magazine-list', which decreases competing for 1 per-system
86
 * magazine cache.
87
 *
776 palkovsky 88
 * - it might be good to add granularity of locks even to slab level,
89
 *   we could then try_spinlock over all partial slabs and thus improve
90
 *   scalability even on slab level
769 palkovsky 91
 */
92
 
93
 
759 palkovsky 94
#include <synch/spinlock.h>
95
#include <mm/slab.h>
788 jermar 96
#include <adt/list.h>
759 palkovsky 97
#include <memstr.h>
98
#include <align.h>
99
#include <mm/heap.h>
762 palkovsky 100
#include <mm/frame.h>
759 palkovsky 101
#include <config.h>
102
#include <print.h>
103
#include <arch.h>
104
#include <panic.h>
762 palkovsky 105
#include <debug.h>
771 palkovsky 106
#include <bitops.h>
759 palkovsky 107
 
108
SPINLOCK_INITIALIZE(slab_cache_lock);
769 palkovsky 109
static LIST_INITIALIZE(slab_cache_list);
759 palkovsky 110
 
769 palkovsky 111
/** Magazine cache */
112
static slab_cache_t mag_cache;
113
/** Cache for cache descriptors */
114
static slab_cache_t slab_cache_cache;
789 palkovsky 115
/** Cache for magcache structure from cache_t */
116
static slab_cache_t *cpu_cache = NULL;
769 palkovsky 117
/** Cache for external slab descriptors
118
 * This time we want per-cpu cache, so do not make it static
119
 * - using SLAB for internal SLAB structures will not deadlock,
120
 *   as all slab structures are 'small' - control structures of
121
 *   their caches do not require further allocation
122
 */
123
static slab_cache_t *slab_extern_cache;
771 palkovsky 124
/** Caches for malloc */
125
static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1];
126
char *malloc_names[] =  {
127
    "malloc-8","malloc-16","malloc-32","malloc-64","malloc-128",
128
    "malloc-256","malloc-512","malloc-1K","malloc-2K",
129
    "malloc-4K","malloc-8K","malloc-16K","malloc-32K",
130
    "malloc-64K","malloc-128K"
131
};
762 palkovsky 132
 
769 palkovsky 133
/** Slab descriptor */
762 palkovsky 134
typedef struct {
135
    slab_cache_t *cache; /**< Pointer to parent cache */
136
    link_t link;       /* List of full/partial slabs */
137
    void *start;       /**< Start address of first available item */
138
    count_t available; /**< Count of available items in this slab */
139
    index_t nextavail; /**< The index of next available item */
140
}slab_t;
141
 
759 palkovsky 142
/**************************************/
762 palkovsky 143
/* SLAB allocation functions          */
759 palkovsky 144
 
762 palkovsky 145
/**
146
 * Allocate frames for slab space and initialize
147
 *
148
 */
149
static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
150
{
151
    void *data;
152
    slab_t *slab;
153
    size_t fsize;
154
    int i;
155
    zone_t *zone = NULL;
156
    int status;
764 palkovsky 157
    frame_t *frame;
759 palkovsky 158
 
786 bondari 159
    data = (void *)frame_alloc_rc_zone(cache->order, FRAME_KA | flags, &status, &zone);
764 palkovsky 160
    if (status != FRAME_OK) {
762 palkovsky 161
        return NULL;
764 palkovsky 162
    }
768 palkovsky 163
    if (! (cache->flags & SLAB_CACHE_SLINSIDE)) {
769 palkovsky 164
        slab = slab_alloc(slab_extern_cache, flags);
762 palkovsky 165
        if (!slab) {
166
            frame_free((__address)data);
167
            return NULL;
168
        }
169
    } else {
170
        fsize = (PAGE_SIZE << cache->order);
171
        slab = data + fsize - sizeof(*slab);
172
    }
764 palkovsky 173
 
762 palkovsky 174
    /* Fill in slab structures */
763 jermar 175
    /* TODO: some better way of accessing the frame */
766 palkovsky 176
    for (i=0; i < (1 << cache->order); i++) {
764 palkovsky 177
        frame = ADDR2FRAME(zone, KA2PA((__address)(data+i*PAGE_SIZE)));
178
        frame->parent = slab;
762 palkovsky 179
    }
180
 
181
    slab->start = data;
182
    slab->available = cache->objects;
183
    slab->nextavail = 0;
767 palkovsky 184
    slab->cache = cache;
762 palkovsky 185
 
186
    for (i=0; i<cache->objects;i++)
187
        *((int *) (slab->start + i*cache->size)) = i+1;
764 palkovsky 188
 
189
    atomic_inc(&cache->allocated_slabs);
762 palkovsky 190
    return slab;
191
}
192
 
759 palkovsky 193
/**
766 palkovsky 194
 * Deallocate space associated with SLAB
762 palkovsky 195
 *
196
 * @return number of freed frames
197
 */
198
static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
199
{
200
    frame_free((__address)slab->start);
768 palkovsky 201
    if (! (cache->flags & SLAB_CACHE_SLINSIDE))
769 palkovsky 202
        slab_free(slab_extern_cache, slab);
764 palkovsky 203
 
204
    atomic_dec(&cache->allocated_slabs);
205
 
762 palkovsky 206
    return 1 << cache->order;
207
}
208
 
209
/** Map object to slab structure */
210
static slab_t * obj2slab(void *obj)
211
{
212
    frame_t *frame;
213
 
214
    frame = frame_addr2frame((__address)obj);
215
    return (slab_t *)frame->parent;
216
}
217
 
218
/**************************************/
219
/* SLAB functions */
220
 
221
 
222
/**
759 palkovsky 223
 * Return object to slab and call a destructor
224
 *
762 palkovsky 225
 * @param slab If the caller knows directly slab of the object, otherwise NULL
226
 *
759 palkovsky 227
 * @return Number of freed pages
228
 */
762 palkovsky 229
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
230
                slab_t *slab)
759 palkovsky 231
{
787 palkovsky 232
    int freed = 0;
233
 
762 palkovsky 234
    if (!slab)
235
        slab = obj2slab(obj);
236
 
767 palkovsky 237
    ASSERT(slab->cache == cache);
238
 
787 palkovsky 239
    if (cache->destructor)
240
        freed = cache->destructor(obj);
241
 
776 palkovsky 242
    spinlock_lock(&cache->slablock);
789 palkovsky 243
    ASSERT(slab->available < cache->objects);
776 palkovsky 244
 
762 palkovsky 245
    *((int *)obj) = slab->nextavail;
246
    slab->nextavail = (obj - slab->start)/cache->size;
247
    slab->available++;
248
 
249
    /* Move it to correct list */
250
    if (slab->available == cache->objects) {
251
        /* Free associated memory */
252
        list_remove(&slab->link);
782 palkovsky 253
        spinlock_unlock(&cache->slablock);
254
 
787 palkovsky 255
        return freed + slab_space_free(cache, slab);
782 palkovsky 256
 
780 palkovsky 257
    } else if (slab->available == 1) {
258
        /* It was in full, move to partial */
259
        list_remove(&slab->link);
260
        list_prepend(&slab->link, &cache->partial_slabs);
762 palkovsky 261
    }
783 palkovsky 262
    spinlock_unlock(&cache->slablock);
787 palkovsky 263
    return freed;
759 palkovsky 264
}
265
 
266
/**
267
 * Take new object from slab or create new if needed
268
 *
269
 * @return Object address or null
270
 */
271
static void * slab_obj_create(slab_cache_t *cache, int flags)
272
{
762 palkovsky 273
    slab_t *slab;
274
    void *obj;
275
 
776 palkovsky 276
    spinlock_lock(&cache->slablock);
277
 
762 palkovsky 278
    if (list_empty(&cache->partial_slabs)) {
279
        /* Allow recursion and reclaiming
280
         * - this should work, as the SLAB control structures
281
         *   are small and do not need to allocte with anything
282
         *   other ten frame_alloc when they are allocating,
283
         *   that's why we should get recursion at most 1-level deep
284
         */
776 palkovsky 285
        spinlock_unlock(&cache->slablock);
762 palkovsky 286
        slab = slab_space_alloc(cache, flags);
780 palkovsky 287
        if (!slab)
288
            return NULL;
776 palkovsky 289
        spinlock_lock(&cache->slablock);
762 palkovsky 290
    } else {
291
        slab = list_get_instance(cache->partial_slabs.next,
292
                     slab_t,
293
                     link);
294
        list_remove(&slab->link);
295
    }
296
    obj = slab->start + slab->nextavail * cache->size;
297
    slab->nextavail = *((int *)obj);
298
    slab->available--;
787 palkovsky 299
 
762 palkovsky 300
    if (! slab->available)
764 palkovsky 301
        list_prepend(&slab->link, &cache->full_slabs);
762 palkovsky 302
    else
764 palkovsky 303
        list_prepend(&slab->link, &cache->partial_slabs);
776 palkovsky 304
 
305
    spinlock_unlock(&cache->slablock);
787 palkovsky 306
 
307
    if (cache->constructor && cache->constructor(obj, flags)) {
308
        /* Bad, bad, construction failed */
309
        slab_obj_destroy(cache, obj, slab);
310
        return NULL;
311
    }
762 palkovsky 312
    return obj;
759 palkovsky 313
}
314
 
315
/**************************************/
316
/* CPU-Cache slab functions */
317
 
318
/**
781 palkovsky 319
 * Finds a full magazine in cache, takes it from list
320
 * and returns it
321
 *
322
 * @param first If true, return first, else last mag
323
 */
324
static slab_magazine_t * get_mag_from_cache(slab_cache_t *cache,
325
                        int first)
326
{
327
    slab_magazine_t *mag = NULL;
328
    link_t *cur;
329
 
330
    spinlock_lock(&cache->maglock);
331
    if (!list_empty(&cache->magazines)) {
332
        if (first)
333
            cur = cache->magazines.next;
334
        else
335
            cur = cache->magazines.prev;
336
        mag = list_get_instance(cur, slab_magazine_t, link);
337
        list_remove(&mag->link);
338
        atomic_dec(&cache->magazine_counter);
339
    }
340
    spinlock_unlock(&cache->maglock);
341
    return mag;
342
}
343
 
344
/** Prepend magazine to magazine list in cache */
345
static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag)
346
{
347
    spinlock_lock(&cache->maglock);
348
 
349
    list_prepend(&mag->link, &cache->magazines);
350
    atomic_inc(&cache->magazine_counter);
351
 
352
    spinlock_unlock(&cache->maglock);
353
}
354
 
355
/**
759 palkovsky 356
 * Free all objects in magazine and free memory associated with magazine
357
 *
358
 * @return Number of freed pages
359
 */
360
static count_t magazine_destroy(slab_cache_t *cache,
361
                slab_magazine_t *mag)
362
{
363
    int i;
364
    count_t frames = 0;
365
 
767 palkovsky 366
    for (i=0;i < mag->busy; i++) {
762 palkovsky 367
        frames += slab_obj_destroy(cache, mag->objs[i], NULL);
767 palkovsky 368
        atomic_dec(&cache->cached_objs);
369
    }
759 palkovsky 370
 
371
    slab_free(&mag_cache, mag);
372
 
373
    return frames;
374
}
375
 
376
/**
769 palkovsky 377
 * Find full magazine, set it as current and return it
378
 *
379
 * Assume cpu_magazine lock is held
380
 */
381
static slab_magazine_t * get_full_current_mag(slab_cache_t *cache)
382
{
383
    slab_magazine_t *cmag, *lastmag, *newmag;
384
 
385
    cmag = cache->mag_cache[CPU->id].current;
386
    lastmag = cache->mag_cache[CPU->id].last;
387
    if (cmag) { /* First try local CPU magazines */
388
        if (cmag->busy)
389
            return cmag;
390
 
391
        if (lastmag && lastmag->busy) {
392
            cache->mag_cache[CPU->id].current = lastmag;
393
            cache->mag_cache[CPU->id].last = cmag;
394
            return lastmag;
395
        }
396
    }
397
    /* Local magazines are empty, import one from magazine list */
781 palkovsky 398
    newmag = get_mag_from_cache(cache, 1);
399
    if (!newmag)
769 palkovsky 400
        return NULL;
401
 
402
    if (lastmag)
781 palkovsky 403
        magazine_destroy(cache, lastmag);
404
 
769 palkovsky 405
    cache->mag_cache[CPU->id].last = cmag;
406
    cache->mag_cache[CPU->id].current = newmag;
407
    return newmag;
408
}
409
 
410
/**
759 palkovsky 411
 * Try to find object in CPU-cache magazines
412
 *
413
 * @return Pointer to object or NULL if not available
414
 */
415
static void * magazine_obj_get(slab_cache_t *cache)
416
{
417
    slab_magazine_t *mag;
767 palkovsky 418
    void *obj;
759 palkovsky 419
 
772 palkovsky 420
    if (!CPU)
421
        return NULL;
422
 
759 palkovsky 423
    spinlock_lock(&cache->mag_cache[CPU->id].lock);
424
 
769 palkovsky 425
    mag = get_full_current_mag(cache);
426
    if (!mag) {
427
        spinlock_unlock(&cache->mag_cache[CPU->id].lock);
428
        return NULL;
759 palkovsky 429
    }
767 palkovsky 430
    obj = mag->objs[--mag->busy];
759 palkovsky 431
    spinlock_unlock(&cache->mag_cache[CPU->id].lock);
767 palkovsky 432
    atomic_dec(&cache->cached_objs);
433
 
434
    return obj;
759 palkovsky 435
}
436
 
437
/**
768 palkovsky 438
 * Assure that the current magazine is empty, return pointer to it, or NULL if
769 palkovsky 439
 * no empty magazine is available and cannot be allocated
759 palkovsky 440
 *
773 palkovsky 441
 * Assume mag_cache[CPU->id].lock is held
442
 *
759 palkovsky 443
 * We have 2 magazines bound to processor.
444
 * First try the current.
445
 *  If full, try the last.
446
 *   If full, put to magazines list.
447
 *   allocate new, exchange last & current
448
 *
768 palkovsky 449
 */
450
static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache)
451
{
452
    slab_magazine_t *cmag,*lastmag,*newmag;
453
 
454
    cmag = cache->mag_cache[CPU->id].current;
455
    lastmag = cache->mag_cache[CPU->id].last;
456
 
457
    if (cmag) {
458
        if (cmag->busy < cmag->size)
459
            return cmag;
460
        if (lastmag && lastmag->busy < lastmag->size) {
461
            cache->mag_cache[CPU->id].last = cmag;
462
            cache->mag_cache[CPU->id].current = lastmag;
463
            return lastmag;
464
        }
465
    }
466
    /* current | last are full | nonexistent, allocate new */
467
    /* We do not want to sleep just because of caching */
468
    /* Especially we do not want reclaiming to start, as
469
     * this would deadlock */
470
    newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
471
    if (!newmag)
472
        return NULL;
473
    newmag->size = SLAB_MAG_SIZE;
474
    newmag->busy = 0;
475
 
476
    /* Flush last to magazine list */
781 palkovsky 477
    if (lastmag)
478
        put_mag_to_cache(cache, lastmag);
479
 
768 palkovsky 480
    /* Move current as last, save new as current */
481
    cache->mag_cache[CPU->id].last = cmag; 
482
    cache->mag_cache[CPU->id].current = newmag;
483
 
484
    return newmag;
485
}
486
 
487
/**
488
 * Put object into CPU-cache magazine
489
 *
759 palkovsky 490
 * @return 0 - success, -1 - could not get memory
491
 */
492
static int magazine_obj_put(slab_cache_t *cache, void *obj)
493
{
494
    slab_magazine_t *mag;
495
 
772 palkovsky 496
    if (!CPU)
497
        return -1;
498
 
759 palkovsky 499
    spinlock_lock(&cache->mag_cache[CPU->id].lock);
768 palkovsky 500
 
501
    mag = make_empty_current_mag(cache);
769 palkovsky 502
    if (!mag) {
503
        spinlock_unlock(&cache->mag_cache[CPU->id].lock);
504
        return -1;
505
    }
759 palkovsky 506
 
507
    mag->objs[mag->busy++] = obj;
508
 
509
    spinlock_unlock(&cache->mag_cache[CPU->id].lock);
767 palkovsky 510
    atomic_inc(&cache->cached_objs);
759 palkovsky 511
    return 0;
512
}
513
 
514
 
515
/**************************************/
762 palkovsky 516
/* SLAB CACHE functions */
759 palkovsky 517
 
762 palkovsky 518
/** Return number of objects that fit in certain cache size */
519
static int comp_objects(slab_cache_t *cache)
520
{
521
    if (cache->flags & SLAB_CACHE_SLINSIDE)
522
        return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
523
    else
524
        return (PAGE_SIZE << cache->order) / cache->size;
525
}
526
 
527
/** Return wasted space in slab */
528
static int badness(slab_cache_t *cache)
529
{
530
    int objects;
531
    int ssize;
532
 
533
    objects = comp_objects(cache);
534
    ssize = PAGE_SIZE << cache->order;
535
    if (cache->flags & SLAB_CACHE_SLINSIDE)
536
        ssize -= sizeof(slab_t);
537
    return ssize - objects*cache->size;
538
}
539
 
789 palkovsky 540
/**
541
 * Initialize mag_cache structure in slab cache
542
 */
543
static void make_magcache(slab_cache_t *cache)
544
{
545
    int i;
546
 
547
    ASSERT(cpu_cache);
548
    cache->mag_cache = slab_alloc(cpu_cache, 0);
549
    for (i=0; i < config.cpu_count; i++) {
550
        memsetb((__address)&cache->mag_cache[i],
551
            sizeof(cache->mag_cache[i]), 0);
552
        spinlock_initialize(&cache->mag_cache[i].lock,
553
                    "slab_maglock_cpu");
554
    }
555
}
556
 
759 palkovsky 557
/** Initialize allocated memory as a slab cache */
558
static void
559
_slab_cache_create(slab_cache_t *cache,
560
           char *name,
561
           size_t size,
562
           size_t align,
563
           int (*constructor)(void *obj, int kmflag),
787 palkovsky 564
           int (*destructor)(void *obj),
759 palkovsky 565
           int flags)
566
{
771 palkovsky 567
    int pages;
783 palkovsky 568
    ipl_t ipl;
759 palkovsky 569
 
570
    memsetb((__address)cache, sizeof(*cache), 0);
571
    cache->name = name;
572
 
766 palkovsky 573
    if (align < sizeof(__native))
574
        align = sizeof(__native);
575
    size = ALIGN_UP(size, align);
576
 
762 palkovsky 577
    cache->size = size;
759 palkovsky 578
 
579
    cache->constructor = constructor;
580
    cache->destructor = destructor;
581
    cache->flags = flags;
582
 
583
    list_initialize(&cache->full_slabs);
584
    list_initialize(&cache->partial_slabs);
585
    list_initialize(&cache->magazines);
776 palkovsky 586
    spinlock_initialize(&cache->slablock, "slab_lock");
587
    spinlock_initialize(&cache->maglock, "slab_maglock");
789 palkovsky 588
    if (! (cache->flags & SLAB_CACHE_NOMAGAZINE))
589
        make_magcache(cache);
759 palkovsky 590
 
591
    /* Compute slab sizes, object counts in slabs etc. */
592
    if (cache->size < SLAB_INSIDE_SIZE)
593
        cache->flags |= SLAB_CACHE_SLINSIDE;
594
 
762 palkovsky 595
    /* Minimum slab order */
771 palkovsky 596
    pages = ((cache->size-1) >> PAGE_WIDTH) + 1;
597
    cache->order = fnzb(pages);
766 palkovsky 598
 
762 palkovsky 599
    while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
600
        cache->order += 1;
601
    }
602
    cache->objects = comp_objects(cache);
766 palkovsky 603
    /* If info fits in, put it inside */
604
    if (badness(cache) > sizeof(slab_t))
605
        cache->flags |= SLAB_CACHE_SLINSIDE;
762 palkovsky 606
 
783 palkovsky 607
    /* Add cache to cache list */
608
    ipl = interrupts_disable();
759 palkovsky 609
    spinlock_lock(&slab_cache_lock);
610
 
611
    list_append(&cache->link, &slab_cache_list);
612
 
613
    spinlock_unlock(&slab_cache_lock);
783 palkovsky 614
    interrupts_restore(ipl);
759 palkovsky 615
}
616
 
617
/** Create slab cache  */
618
slab_cache_t * slab_cache_create(char *name,
619
                 size_t size,
620
                 size_t align,
621
                 int (*constructor)(void *obj, int kmflag),
787 palkovsky 622
                 int (*destructor)(void *obj),
759 palkovsky 623
                 int flags)
624
{
625
    slab_cache_t *cache;
626
 
769 palkovsky 627
    cache = slab_alloc(&slab_cache_cache, 0);
759 palkovsky 628
    _slab_cache_create(cache, name, size, align, constructor, destructor,
629
               flags);
630
    return cache;
631
}
632
 
633
/**
634
 * Reclaim space occupied by objects that are already free
635
 *
636
 * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing
637
 * @return Number of freed pages
638
 */
639
static count_t _slab_reclaim(slab_cache_t *cache, int flags)
640
{
641
    int i;
642
    slab_magazine_t *mag;
643
    count_t frames = 0;
781 palkovsky 644
    int magcount;
759 palkovsky 645
 
646
    if (cache->flags & SLAB_CACHE_NOMAGAZINE)
647
        return 0; /* Nothing to do */
781 palkovsky 648
 
649
    /* We count up to original magazine count to avoid
650
     * endless loop
651
     */
652
    magcount = atomic_get(&cache->magazine_counter);
653
    while (magcount-- && (mag=get_mag_from_cache(cache,0))) {
654
        frames += magazine_destroy(cache,mag);
655
        if (!(flags & SLAB_RECLAIM_ALL) && frames)
656
            break;
769 palkovsky 657
    }
759 palkovsky 658
 
659
    if (flags & SLAB_RECLAIM_ALL) {
781 palkovsky 660
        /* Free cpu-bound magazines */
759 palkovsky 661
        /* Destroy CPU magazines */
662
        for (i=0; i<config.cpu_count; i++) {
781 palkovsky 663
            spinlock_lock(&cache->mag_cache[i].lock);
664
 
759 palkovsky 665
            mag = cache->mag_cache[i].current;
666
            if (mag)
667
                frames += magazine_destroy(cache, mag);
668
            cache->mag_cache[i].current = NULL;
669
 
670
            mag = cache->mag_cache[i].last;
671
            if (mag)
672
                frames += magazine_destroy(cache, mag);
673
            cache->mag_cache[i].last = NULL;
781 palkovsky 674
 
675
            spinlock_unlock(&cache->mag_cache[i].lock);
759 palkovsky 676
        }
677
    }
767 palkovsky 678
 
759 palkovsky 679
    return frames;
680
}
681
 
682
/** Check that there are no slabs and remove cache from system  */
683
void slab_cache_destroy(slab_cache_t *cache)
684
{
781 palkovsky 685
    ipl_t ipl;
686
 
687
    /* First remove cache from link, so that we don't need
688
     * to disable interrupts later
689
     */
690
 
691
    ipl = interrupts_disable();
692
    spinlock_lock(&slab_cache_lock);
693
 
694
    list_remove(&cache->link);
695
 
696
    spinlock_unlock(&slab_cache_lock);
697
    interrupts_restore(ipl);
698
 
759 palkovsky 699
    /* Do not lock anything, we assume the software is correct and
700
     * does not touch the cache when it decides to destroy it */
701
 
702
    /* Destroy all magazines */
703
    _slab_reclaim(cache, SLAB_RECLAIM_ALL);
704
 
705
    /* All slabs must be empty */
706
    if (!list_empty(&cache->full_slabs) \
707
        || !list_empty(&cache->partial_slabs))
708
        panic("Destroying cache that is not empty.");
709
 
789 palkovsky 710
    if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
711
        slab_free(cpu_cache, cache->mag_cache);
769 palkovsky 712
    slab_free(&slab_cache_cache, cache);
759 palkovsky 713
}
714
 
715
/** Allocate new object from cache - if no flags given, always returns
716
    memory */
717
void * slab_alloc(slab_cache_t *cache, int flags)
718
{
719
    ipl_t ipl;
720
    void *result = NULL;
773 palkovsky 721
 
759 palkovsky 722
    /* Disable interrupts to avoid deadlocks with interrupt handlers */
723
    ipl = interrupts_disable();
771 palkovsky 724
 
772 palkovsky 725
    if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
759 palkovsky 726
        result = magazine_obj_get(cache);
776 palkovsky 727
    if (!result)
759 palkovsky 728
        result = slab_obj_create(cache, flags);
729
 
769 palkovsky 730
    interrupts_restore(ipl);
731
 
764 palkovsky 732
    if (result)
733
        atomic_inc(&cache->allocated_objs);
734
 
759 palkovsky 735
    return result;
736
}
737
 
771 palkovsky 738
/** Return object to cache, use slab if known  */
739
static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab)
759 palkovsky 740
{
741
    ipl_t ipl;
742
 
743
    ipl = interrupts_disable();
744
 
762 palkovsky 745
    if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
746
        || magazine_obj_put(cache, obj)) {
776 palkovsky 747
 
771 palkovsky 748
        slab_obj_destroy(cache, obj, slab);
776 palkovsky 749
 
759 palkovsky 750
    }
769 palkovsky 751
    interrupts_restore(ipl);
764 palkovsky 752
    atomic_dec(&cache->allocated_objs);
759 palkovsky 753
}
754
 
771 palkovsky 755
/** Return slab object to cache */
756
void slab_free(slab_cache_t *cache, void *obj)
757
{
758
    _slab_free(cache,obj,NULL);
759
}
760
 
759 palkovsky 761
/* Go through all caches and reclaim what is possible */
762
count_t slab_reclaim(int flags)
763
{
764
    slab_cache_t *cache;
765
    link_t *cur;
766
    count_t frames = 0;
767
 
768
    spinlock_lock(&slab_cache_lock);
769
 
776 palkovsky 770
    /* TODO: Add assert, that interrupts are disabled, otherwise
771
     * memory allocation from interrupts can deadlock.
772
     */
773
 
759 palkovsky 774
    for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
775
        cache = list_get_instance(cur, slab_cache_t, link);
776
        frames += _slab_reclaim(cache, flags);
777
    }
778
 
779
    spinlock_unlock(&slab_cache_lock);
780
 
781
    return frames;
782
}
783
 
784
 
785
/* Print list of slabs */
786
void slab_print_list(void)
787
{
788
    slab_cache_t *cache;
789
    link_t *cur;
783 palkovsky 790
    ipl_t ipl;
791
 
792
    ipl = interrupts_disable();
759 palkovsky 793
    spinlock_lock(&slab_cache_lock);
767 palkovsky 794
    printf("SLAB name\tOsize\tPages\tObj/pg\tSlabs\tCached\tAllocobjs\tCtl\n");
759 palkovsky 795
    for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
796
        cache = list_get_instance(cur, slab_cache_t, link);
767 palkovsky 797
        printf("%s\t%d\t%d\t%d\t%d\t%d\t%d\t\t%s\n", cache->name, cache->size,
766 palkovsky 798
               (1 << cache->order), cache->objects,
767 palkovsky 799
               atomic_get(&cache->allocated_slabs),
800
               atomic_get(&cache->cached_objs),
766 palkovsky 801
               atomic_get(&cache->allocated_objs),
802
               cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out");
759 palkovsky 803
    }
804
    spinlock_unlock(&slab_cache_lock);
783 palkovsky 805
    interrupts_restore(ipl);
759 palkovsky 806
}
807
 
778 palkovsky 808
#ifdef CONFIG_DEBUG
809
static int _slab_initialized = 0;
810
#endif
811
 
759 palkovsky 812
void slab_cache_init(void)
813
{
771 palkovsky 814
    int i, size;
815
 
759 palkovsky 816
    /* Initialize magazine cache */
817
    _slab_cache_create(&mag_cache,
818
               "slab_magazine",
819
               sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*),
820
               sizeof(__address),
821
               NULL, NULL,
769 palkovsky 822
               SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
823
    /* Initialize slab_cache cache */
824
    _slab_cache_create(&slab_cache_cache,
825
               "slab_cache",
789 palkovsky 826
               sizeof(slab_cache_cache),
769 palkovsky 827
               sizeof(__address),
828
               NULL, NULL,
829
               SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
830
    /* Initialize external slab cache */
831
    slab_extern_cache = slab_cache_create("slab_extern",
832
                          sizeof(slab_t),
833
                          0, NULL, NULL,
789 palkovsky 834
                          SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED);
759 palkovsky 835
 
836
    /* Initialize structures for malloc */
771 palkovsky 837
    for (i=0, size=(1<<SLAB_MIN_MALLOC_W);
838
         i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1);
839
         i++, size <<= 1) {
840
        malloc_caches[i] = slab_cache_create(malloc_names[i],
841
                             size, 0,
789 palkovsky 842
                             NULL,NULL, SLAB_CACHE_MAGDEFERRED);
771 palkovsky 843
    }
778 palkovsky 844
#ifdef CONFIG_DEBUG       
845
    _slab_initialized = 1;
846
#endif
759 palkovsky 847
}
771 palkovsky 848
 
789 palkovsky 849
/** Enable cpu_cache
850
 *
851
 * Kernel calls this function, when it knows the real number of
852
 * processors.
853
 * Allocate slab for cpucache and enable it on all existing
854
 * slabs that are SLAB_CACHE_MAGDEFERRED
855
 */
856
void slab_enable_cpucache(void)
857
{
858
    link_t *cur;
859
    slab_cache_t *s;
860
 
861
    cpu_cache = slab_cache_create("magcpucache",
862
                      sizeof(slab_mag_cache_t) * config.cpu_count,
863
                      0, NULL, NULL,
864
                      SLAB_CACHE_NOMAGAZINE);
865
    spinlock_lock(&slab_cache_lock);
866
 
867
    for (cur=slab_cache_list.next; cur != &slab_cache_list;cur=cur->next){
868
        s = list_get_instance(cur, slab_cache_t, link);
869
        if ((s->flags & SLAB_CACHE_MAGDEFERRED) != SLAB_CACHE_MAGDEFERRED)
870
            continue;
871
        make_magcache(s);
872
        s->flags &= ~SLAB_CACHE_MAGDEFERRED;
873
    }
874
 
875
    spinlock_unlock(&slab_cache_lock);
876
}
877
 
771 palkovsky 878
/**************************************/
879
/* kalloc/kfree functions             */
880
void * kalloc(unsigned int size, int flags)
881
{
882
    int idx;
778 palkovsky 883
 
884
    ASSERT(_slab_initialized);
771 palkovsky 885
    ASSERT( size && size <= (1 << SLAB_MAX_MALLOC_W));
886
 
887
    if (size < (1 << SLAB_MIN_MALLOC_W))
888
        size = (1 << SLAB_MIN_MALLOC_W);
889
 
890
    idx = fnzb(size-1) - SLAB_MIN_MALLOC_W + 1;
891
 
892
    return slab_alloc(malloc_caches[idx], flags);
893
}
894
 
895
 
896
void kfree(void *obj)
897
{
781 palkovsky 898
    slab_t *slab;
899
 
900
    if (!obj) return;
901
 
902
    slab = obj2slab(obj);
771 palkovsky 903
    _slab_free(slab->cache, obj, slab);
904
}