Subversion Repositories HelenOS-historic

Rev

Rev 769 | Rev 772 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
759 palkovsky 1
/*
2
 * Copyright (C) 2006 Ondrej Palkovsky
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
769 palkovsky 29
/*
30
 * The SLAB allocator is closely modelled after Opensolaris SLAB allocator
31
 * http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/
32
 *
33
 * with the following exceptions:
34
 *   - empty SLABS are deallocated immediately
35
 *     (in Linux they are kept in linked list, in Solaris ???)
36
 *   - empty magazines are deallocated when not needed
37
 *     (in Solaris they are held in linked list in slab cache)
38
 *
39
 *   Following features are not currently supported but would be easy to do:
40
 *   - cache coloring
41
 *   - dynamic magazine growing (different magazine sizes are already
42
 *     supported, but we would need to adjust allocating strategy)
43
 *
44
 * The SLAB allocator supports per-CPU caches ('magazines') to facilitate
45
 * good SMP scaling.
46
 *
47
 * When a new object is being allocated, it is first checked, if it is
48
 * available in CPU-bound magazine. If it is not found there, it is
49
 * allocated from CPU-shared SLAB - if partial full is found, it is used,
50
 * otherwise a new one is allocated.
51
 *
52
 * When an object is being deallocated, it is put to CPU-bound magazine.
53
 * If there is no such magazine, new one is allocated (if it fails,
54
 * the object is deallocated into SLAB). If the magazine is full, it is
55
 * put into cpu-shared list of magazines and new one is allocated.
56
 *
57
 * The CPU-bound magazine is actually a pair of magazine to avoid
58
 * thrashing when somebody is allocating/deallocating 1 item at the magazine
59
 * size boundary. LIFO order is enforced, which should avoid fragmentation
60
 * as much as possible.
61
 *  
62
 * Every cache contains list of full slabs and list of partialy full slabs.
63
 * Empty SLABS are immediately freed (thrashing will be avoided because
64
 * of magazines).
65
 *
66
 * The SLAB information structure is kept inside the data area, if possible.
67
 * The cache can be marked that it should not use magazines. This is used
68
 * only for SLAB related caches to avoid deadlocks and infinite recursion
69
 * (the SLAB allocator uses itself for allocating all it's control structures).
70
 *
71
 * The SLAB allocator allocates lot of space and does not free it. When
72
 * frame allocator fails to allocate the frame, it calls slab_reclaim().
73
 * It tries 'light reclaim' first, then brutal reclaim. The light reclaim
74
 * releases slabs from cpu-shared magazine-list, until at least 1 slab
75
 * is deallocated in each cache (this algorithm should probably change).
76
 * The brutal reclaim removes all cached objects, even from CPU-bound
77
 * magazines.
78
 *
79
 *
80
 */
81
 
82
 
759 palkovsky 83
#include <synch/spinlock.h>
84
#include <mm/slab.h>
85
#include <list.h>
86
#include <memstr.h>
87
#include <align.h>
88
#include <mm/heap.h>
762 palkovsky 89
#include <mm/frame.h>
759 palkovsky 90
#include <config.h>
91
#include <print.h>
92
#include <arch.h>
93
#include <panic.h>
762 palkovsky 94
#include <debug.h>
771 palkovsky 95
#include <bitops.h>
759 palkovsky 96
 
97
SPINLOCK_INITIALIZE(slab_cache_lock);
769 palkovsky 98
static LIST_INITIALIZE(slab_cache_list);
759 palkovsky 99
 
769 palkovsky 100
/** Magazine cache */
101
static slab_cache_t mag_cache;
102
/** Cache for cache descriptors */
103
static slab_cache_t slab_cache_cache;
759 palkovsky 104
 
769 palkovsky 105
/** Cache for external slab descriptors
106
 * This time we want per-cpu cache, so do not make it static
107
 * - using SLAB for internal SLAB structures will not deadlock,
108
 *   as all slab structures are 'small' - control structures of
109
 *   their caches do not require further allocation
110
 */
111
static slab_cache_t *slab_extern_cache;
771 palkovsky 112
/** Caches for malloc */
113
static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1];
114
char *malloc_names[] =  {
115
    "malloc-8","malloc-16","malloc-32","malloc-64","malloc-128",
116
    "malloc-256","malloc-512","malloc-1K","malloc-2K",
117
    "malloc-4K","malloc-8K","malloc-16K","malloc-32K",
118
    "malloc-64K","malloc-128K"
119
};
762 palkovsky 120
 
769 palkovsky 121
/** Slab descriptor */
762 palkovsky 122
typedef struct {
123
    slab_cache_t *cache; /**< Pointer to parent cache */
124
    link_t link;       /* List of full/partial slabs */
125
    void *start;       /**< Start address of first available item */
126
    count_t available; /**< Count of available items in this slab */
127
    index_t nextavail; /**< The index of next available item */
128
}slab_t;
129
 
759 palkovsky 130
/**************************************/
762 palkovsky 131
/* SLAB allocation functions          */
759 palkovsky 132
 
762 palkovsky 133
/**
134
 * Allocate frames for slab space and initialize
135
 *
136
 */
137
static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
138
{
139
    void *data;
140
    slab_t *slab;
141
    size_t fsize;
142
    int i;
143
    zone_t *zone = NULL;
144
    int status;
764 palkovsky 145
    frame_t *frame;
759 palkovsky 146
 
762 palkovsky 147
    data = (void *)frame_alloc(FRAME_KA | flags, cache->order, &status, &zone);
764 palkovsky 148
    if (status != FRAME_OK) {
762 palkovsky 149
        return NULL;
764 palkovsky 150
    }
768 palkovsky 151
    if (! (cache->flags & SLAB_CACHE_SLINSIDE)) {
769 palkovsky 152
        slab = slab_alloc(slab_extern_cache, flags);
762 palkovsky 153
        if (!slab) {
154
            frame_free((__address)data);
155
            return NULL;
156
        }
157
    } else {
158
        fsize = (PAGE_SIZE << cache->order);
159
        slab = data + fsize - sizeof(*slab);
160
    }
764 palkovsky 161
 
762 palkovsky 162
    /* Fill in slab structures */
763 jermar 163
    /* TODO: some better way of accessing the frame */
766 palkovsky 164
    for (i=0; i < (1 << cache->order); i++) {
764 palkovsky 165
        frame = ADDR2FRAME(zone, KA2PA((__address)(data+i*PAGE_SIZE)));
166
        frame->parent = slab;
762 palkovsky 167
    }
168
 
169
    slab->start = data;
170
    slab->available = cache->objects;
171
    slab->nextavail = 0;
767 palkovsky 172
    slab->cache = cache;
762 palkovsky 173
 
174
    for (i=0; i<cache->objects;i++)
175
        *((int *) (slab->start + i*cache->size)) = i+1;
764 palkovsky 176
 
177
    atomic_inc(&cache->allocated_slabs);
762 palkovsky 178
    return slab;
179
}
180
 
759 palkovsky 181
/**
766 palkovsky 182
 * Deallocate space associated with SLAB
762 palkovsky 183
 *
184
 * @return number of freed frames
185
 */
186
static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
187
{
188
    frame_free((__address)slab->start);
768 palkovsky 189
    if (! (cache->flags & SLAB_CACHE_SLINSIDE))
769 palkovsky 190
        slab_free(slab_extern_cache, slab);
764 palkovsky 191
 
192
    atomic_dec(&cache->allocated_slabs);
193
 
762 palkovsky 194
    return 1 << cache->order;
195
}
196
 
197
/** Map object to slab structure */
198
static slab_t * obj2slab(void *obj)
199
{
200
    frame_t *frame;
201
 
202
    frame = frame_addr2frame((__address)obj);
203
    return (slab_t *)frame->parent;
204
}
205
 
206
/**************************************/
207
/* SLAB functions */
208
 
209
 
210
/**
759 palkovsky 211
 * Return object to slab and call a destructor
212
 *
762 palkovsky 213
 * Assume the cache->lock is held;
214
 *
215
 * @param slab If the caller knows directly slab of the object, otherwise NULL
216
 *
759 palkovsky 217
 * @return Number of freed pages
218
 */
762 palkovsky 219
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
220
                slab_t *slab)
759 palkovsky 221
{
762 palkovsky 222
    count_t frames = 0;
223
 
224
    if (!slab)
225
        slab = obj2slab(obj);
226
 
767 palkovsky 227
    ASSERT(slab->cache == cache);
228
 
762 palkovsky 229
    *((int *)obj) = slab->nextavail;
230
    slab->nextavail = (obj - slab->start)/cache->size;
231
    slab->available++;
232
 
233
    /* Move it to correct list */
234
    if (slab->available == 1) {
235
        /* It was in full, move to partial */
236
        list_remove(&slab->link);
764 palkovsky 237
        list_prepend(&slab->link, &cache->partial_slabs);
762 palkovsky 238
    }
239
    if (slab->available == cache->objects) {
240
        /* Free associated memory */
241
        list_remove(&slab->link);
242
        /* Avoid deadlock */
243
        spinlock_unlock(&cache->lock);
244
        frames = slab_space_free(cache, slab);
245
        spinlock_lock(&cache->lock);
246
    }
247
 
248
    return frames;
759 palkovsky 249
}
250
 
251
/**
252
 * Take new object from slab or create new if needed
253
 *
762 palkovsky 254
 * Assume cache->lock is held.
255
 *
759 palkovsky 256
 * @return Object address or null
257
 */
258
static void * slab_obj_create(slab_cache_t *cache, int flags)
259
{
762 palkovsky 260
    slab_t *slab;
261
    void *obj;
262
 
263
    if (list_empty(&cache->partial_slabs)) {
264
        /* Allow recursion and reclaiming
265
         * - this should work, as the SLAB control structures
266
         *   are small and do not need to allocte with anything
267
         *   other ten frame_alloc when they are allocating,
268
         *   that's why we should get recursion at most 1-level deep
269
         */
270
        spinlock_unlock(&cache->lock);
271
        slab = slab_space_alloc(cache, flags);
272
        spinlock_lock(&cache->lock);
764 palkovsky 273
        if (!slab) {
762 palkovsky 274
            return NULL;
764 palkovsky 275
        }
762 palkovsky 276
    } else {
277
        slab = list_get_instance(cache->partial_slabs.next,
278
                     slab_t,
279
                     link);
280
        list_remove(&slab->link);
281
    }
282
    obj = slab->start + slab->nextavail * cache->size;
283
    slab->nextavail = *((int *)obj);
284
    slab->available--;
285
    if (! slab->available)
764 palkovsky 286
        list_prepend(&slab->link, &cache->full_slabs);
762 palkovsky 287
    else
764 palkovsky 288
        list_prepend(&slab->link, &cache->partial_slabs);
762 palkovsky 289
    return obj;
759 palkovsky 290
}
291
 
292
/**************************************/
293
/* CPU-Cache slab functions */
294
 
295
/**
296
 * Free all objects in magazine and free memory associated with magazine
297
 *
762 palkovsky 298
 * Assume mag_cache[cpu].lock is locked
759 palkovsky 299
 *
300
 * @return Number of freed pages
301
 */
302
static count_t magazine_destroy(slab_cache_t *cache,
303
                slab_magazine_t *mag)
304
{
305
    int i;
306
    count_t frames = 0;
307
 
767 palkovsky 308
    for (i=0;i < mag->busy; i++) {
762 palkovsky 309
        frames += slab_obj_destroy(cache, mag->objs[i], NULL);
767 palkovsky 310
        atomic_dec(&cache->cached_objs);
311
    }
759 palkovsky 312
 
313
    slab_free(&mag_cache, mag);
314
 
315
    return frames;
316
}
317
 
318
/**
769 palkovsky 319
 * Find full magazine, set it as current and return it
320
 *
321
 * Assume cpu_magazine lock is held
322
 */
323
static slab_magazine_t * get_full_current_mag(slab_cache_t *cache)
324
{
325
    slab_magazine_t *cmag, *lastmag, *newmag;
326
 
327
    cmag = cache->mag_cache[CPU->id].current;
328
    lastmag = cache->mag_cache[CPU->id].last;
329
    if (cmag) { /* First try local CPU magazines */
330
        if (cmag->busy)
331
            return cmag;
332
 
333
        if (lastmag && lastmag->busy) {
334
            cache->mag_cache[CPU->id].current = lastmag;
335
            cache->mag_cache[CPU->id].last = cmag;
336
            return lastmag;
337
        }
338
    }
339
    /* Local magazines are empty, import one from magazine list */
340
    spinlock_lock(&cache->lock);
341
    if (list_empty(&cache->magazines)) {
342
        spinlock_unlock(&cache->lock);
343
        return NULL;
344
    }
345
    newmag = list_get_instance(cache->magazines.next,
346
                   slab_magazine_t,
347
                   link);
348
    list_remove(&newmag->link);
349
    spinlock_unlock(&cache->lock);
350
 
351
    if (lastmag)
352
        slab_free(&mag_cache, lastmag);
353
    cache->mag_cache[CPU->id].last = cmag;
354
    cache->mag_cache[CPU->id].current = newmag;
355
    return newmag;
356
}
357
 
358
/**
759 palkovsky 359
 * Try to find object in CPU-cache magazines
360
 *
361
 * @return Pointer to object or NULL if not available
362
 */
363
static void * magazine_obj_get(slab_cache_t *cache)
364
{
365
    slab_magazine_t *mag;
767 palkovsky 366
    void *obj;
759 palkovsky 367
 
368
    spinlock_lock(&cache->mag_cache[CPU->id].lock);
369
 
769 palkovsky 370
    mag = get_full_current_mag(cache);
371
    if (!mag) {
372
        spinlock_unlock(&cache->mag_cache[CPU->id].lock);
373
        return NULL;
759 palkovsky 374
    }
767 palkovsky 375
    obj = mag->objs[--mag->busy];
759 palkovsky 376
    spinlock_unlock(&cache->mag_cache[CPU->id].lock);
767 palkovsky 377
    atomic_dec(&cache->cached_objs);
378
 
379
    return obj;
759 palkovsky 380
}
381
 
382
/**
768 palkovsky 383
 * Assure that the current magazine is empty, return pointer to it, or NULL if
769 palkovsky 384
 * no empty magazine is available and cannot be allocated
759 palkovsky 385
 *
386
 * We have 2 magazines bound to processor.
387
 * First try the current.
388
 *  If full, try the last.
389
 *   If full, put to magazines list.
390
 *   allocate new, exchange last & current
391
 *
768 palkovsky 392
 */
393
static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache)
394
{
395
    slab_magazine_t *cmag,*lastmag,*newmag;
396
 
397
    cmag = cache->mag_cache[CPU->id].current;
398
    lastmag = cache->mag_cache[CPU->id].last;
399
 
400
    if (cmag) {
401
        if (cmag->busy < cmag->size)
402
            return cmag;
403
        if (lastmag && lastmag->busy < lastmag->size) {
404
            cache->mag_cache[CPU->id].last = cmag;
405
            cache->mag_cache[CPU->id].current = lastmag;
406
            return lastmag;
407
        }
408
    }
409
    /* current | last are full | nonexistent, allocate new */
410
    /* We do not want to sleep just because of caching */
411
    /* Especially we do not want reclaiming to start, as
412
     * this would deadlock */
413
    newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
414
    if (!newmag)
415
        return NULL;
416
    newmag->size = SLAB_MAG_SIZE;
417
    newmag->busy = 0;
418
 
419
    /* Flush last to magazine list */
420
    if (lastmag)
421
        list_prepend(&lastmag->link, &cache->magazines);
422
    /* Move current as last, save new as current */
423
    cache->mag_cache[CPU->id].last = cmag; 
424
    cache->mag_cache[CPU->id].current = newmag;
425
 
426
    return newmag;
427
}
428
 
429
/**
430
 * Put object into CPU-cache magazine
431
 *
759 palkovsky 432
 * @return 0 - success, -1 - could not get memory
433
 */
434
static int magazine_obj_put(slab_cache_t *cache, void *obj)
435
{
436
    slab_magazine_t *mag;
437
 
438
    spinlock_lock(&cache->mag_cache[CPU->id].lock);
768 palkovsky 439
 
440
    mag = make_empty_current_mag(cache);
769 palkovsky 441
    if (!mag) {
442
        spinlock_unlock(&cache->mag_cache[CPU->id].lock);
443
        return -1;
444
    }
759 palkovsky 445
 
446
    mag->objs[mag->busy++] = obj;
447
 
448
    spinlock_unlock(&cache->mag_cache[CPU->id].lock);
767 palkovsky 449
    atomic_inc(&cache->cached_objs);
759 palkovsky 450
    return 0;
451
}
452
 
453
 
454
/**************************************/
762 palkovsky 455
/* SLAB CACHE functions */
759 palkovsky 456
 
762 palkovsky 457
/** Return number of objects that fit in certain cache size */
458
static int comp_objects(slab_cache_t *cache)
459
{
460
    if (cache->flags & SLAB_CACHE_SLINSIDE)
461
        return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
462
    else
463
        return (PAGE_SIZE << cache->order) / cache->size;
464
}
465
 
466
/** Return wasted space in slab */
467
static int badness(slab_cache_t *cache)
468
{
469
    int objects;
470
    int ssize;
471
 
472
    objects = comp_objects(cache);
473
    ssize = PAGE_SIZE << cache->order;
474
    if (cache->flags & SLAB_CACHE_SLINSIDE)
475
        ssize -= sizeof(slab_t);
476
    return ssize - objects*cache->size;
477
}
478
 
759 palkovsky 479
/** Initialize allocated memory as a slab cache */
480
static void
481
_slab_cache_create(slab_cache_t *cache,
482
           char *name,
483
           size_t size,
484
           size_t align,
485
           int (*constructor)(void *obj, int kmflag),
486
           void (*destructor)(void *obj),
487
           int flags)
488
{
489
    int i;
771 palkovsky 490
    int pages;
759 palkovsky 491
 
492
    memsetb((__address)cache, sizeof(*cache), 0);
493
    cache->name = name;
494
 
766 palkovsky 495
    if (align < sizeof(__native))
496
        align = sizeof(__native);
497
    size = ALIGN_UP(size, align);
498
 
762 palkovsky 499
    cache->size = size;
759 palkovsky 500
 
501
    cache->constructor = constructor;
502
    cache->destructor = destructor;
503
    cache->flags = flags;
504
 
505
    list_initialize(&cache->full_slabs);
506
    list_initialize(&cache->partial_slabs);
507
    list_initialize(&cache->magazines);
508
    spinlock_initialize(&cache->lock, "cachelock");
768 palkovsky 509
    if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) {
759 palkovsky 510
        for (i=0; i< config.cpu_count; i++)
511
            spinlock_initialize(&cache->mag_cache[i].lock,
512
                        "cpucachelock");
513
    }
514
 
515
    /* Compute slab sizes, object counts in slabs etc. */
516
    if (cache->size < SLAB_INSIDE_SIZE)
517
        cache->flags |= SLAB_CACHE_SLINSIDE;
518
 
762 palkovsky 519
    /* Minimum slab order */
771 palkovsky 520
    pages = ((cache->size-1) >> PAGE_WIDTH) + 1;
521
    cache->order = fnzb(pages);
766 palkovsky 522
 
762 palkovsky 523
    while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
524
        cache->order += 1;
525
    }
526
    cache->objects = comp_objects(cache);
766 palkovsky 527
    /* If info fits in, put it inside */
528
    if (badness(cache) > sizeof(slab_t))
529
        cache->flags |= SLAB_CACHE_SLINSIDE;
762 palkovsky 530
 
759 palkovsky 531
    spinlock_lock(&slab_cache_lock);
532
 
533
    list_append(&cache->link, &slab_cache_list);
534
 
535
    spinlock_unlock(&slab_cache_lock);
536
}
537
 
538
/** Create slab cache  */
539
slab_cache_t * slab_cache_create(char *name,
540
                 size_t size,
541
                 size_t align,
542
                 int (*constructor)(void *obj, int kmflag),
543
                 void (*destructor)(void *obj),
544
                 int flags)
545
{
546
    slab_cache_t *cache;
547
 
769 palkovsky 548
    cache = slab_alloc(&slab_cache_cache, 0);
759 palkovsky 549
    _slab_cache_create(cache, name, size, align, constructor, destructor,
550
               flags);
551
    return cache;
552
}
553
 
554
/**
555
 * Reclaim space occupied by objects that are already free
556
 *
557
 * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing
558
 * @return Number of freed pages
559
 */
560
static count_t _slab_reclaim(slab_cache_t *cache, int flags)
561
{
562
    int i;
563
    slab_magazine_t *mag;
564
    link_t *cur;
565
    count_t frames = 0;
566
 
567
    if (cache->flags & SLAB_CACHE_NOMAGAZINE)
568
        return 0; /* Nothing to do */
569
 
570
    /* First lock all cpu caches, then the complete cache lock */
769 palkovsky 571
    if (flags & SLAB_RECLAIM_ALL) {
572
        for (i=0; i < config.cpu_count; i++)
573
            spinlock_lock(&cache->mag_cache[i].lock);
574
    }
759 palkovsky 575
    spinlock_lock(&cache->lock);
576
 
577
    if (flags & SLAB_RECLAIM_ALL) {
762 palkovsky 578
        /* Aggressive memfree */
759 palkovsky 579
        /* Destroy CPU magazines */
580
        for (i=0; i<config.cpu_count; i++) {
581
            mag = cache->mag_cache[i].current;
582
            if (mag)
583
                frames += magazine_destroy(cache, mag);
584
            cache->mag_cache[i].current = NULL;
585
 
586
            mag = cache->mag_cache[i].last;
587
            if (mag)
588
                frames += magazine_destroy(cache, mag);
589
            cache->mag_cache[i].last = NULL;
590
        }
591
    }
762 palkovsky 592
    /* Destroy full magazines */
593
    cur=cache->magazines.prev;
767 palkovsky 594
 
768 palkovsky 595
    while (cur != &cache->magazines) {
762 palkovsky 596
        mag = list_get_instance(cur, slab_magazine_t, link);
597
 
598
        cur = cur->prev;
768 palkovsky 599
        list_remove(&mag->link);
762 palkovsky 600
        frames += magazine_destroy(cache,mag);
601
        /* If we do not do full reclaim, break
602
         * as soon as something is freed */
603
        if (!(flags & SLAB_RECLAIM_ALL) && frames)
604
            break;
605
    }
759 palkovsky 606
 
607
    spinlock_unlock(&cache->lock);
769 palkovsky 608
    if (flags & SLAB_RECLAIM_ALL) {
609
        for (i=0; i < config.cpu_count; i++)
610
            spinlock_unlock(&cache->mag_cache[i].lock);
611
    }
759 palkovsky 612
 
613
    return frames;
614
}
615
 
616
/** Check that there are no slabs and remove cache from system  */
617
void slab_cache_destroy(slab_cache_t *cache)
618
{
619
    /* Do not lock anything, we assume the software is correct and
620
     * does not touch the cache when it decides to destroy it */
621
 
622
    /* Destroy all magazines */
623
    _slab_reclaim(cache, SLAB_RECLAIM_ALL);
624
 
625
    /* All slabs must be empty */
626
    if (!list_empty(&cache->full_slabs) \
627
        || !list_empty(&cache->partial_slabs))
628
        panic("Destroying cache that is not empty.");
629
 
630
    spinlock_lock(&slab_cache_lock);
631
    list_remove(&cache->link);
632
    spinlock_unlock(&slab_cache_lock);
633
 
769 palkovsky 634
    slab_free(&slab_cache_cache, cache);
759 palkovsky 635
}
636
 
637
/** Allocate new object from cache - if no flags given, always returns
638
    memory */
639
void * slab_alloc(slab_cache_t *cache, int flags)
640
{
641
    ipl_t ipl;
642
    void *result = NULL;
643
 
644
    /* Disable interrupts to avoid deadlocks with interrupt handlers */
645
    ipl = interrupts_disable();
771 palkovsky 646
 
647
    if (!(cache->flags & SLAB_CACHE_NOMAGAZINE) && CPU)
759 palkovsky 648
        result = magazine_obj_get(cache);
649
 
762 palkovsky 650
    if (!result) {
651
        spinlock_lock(&cache->lock);
759 palkovsky 652
        result = slab_obj_create(cache, flags);
762 palkovsky 653
        spinlock_unlock(&cache->lock);
654
    }
759 palkovsky 655
 
769 palkovsky 656
    interrupts_restore(ipl);
657
 
764 palkovsky 658
    if (result)
659
        atomic_inc(&cache->allocated_objs);
660
 
759 palkovsky 661
    return result;
662
}
663
 
771 palkovsky 664
/** Return object to cache, use slab if known  */
665
static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab)
759 palkovsky 666
{
667
    ipl_t ipl;
668
 
669
    ipl = interrupts_disable();
670
 
762 palkovsky 671
    if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
771 palkovsky 672
        || !CPU \
762 palkovsky 673
        || magazine_obj_put(cache, obj)) {
674
 
675
        spinlock_lock(&cache->lock);
771 palkovsky 676
        slab_obj_destroy(cache, obj, slab);
762 palkovsky 677
        spinlock_unlock(&cache->lock);
759 palkovsky 678
    }
769 palkovsky 679
    interrupts_restore(ipl);
764 palkovsky 680
    atomic_dec(&cache->allocated_objs);
759 palkovsky 681
}
682
 
771 palkovsky 683
/** Return slab object to cache */
684
void slab_free(slab_cache_t *cache, void *obj)
685
{
686
    _slab_free(cache,obj,NULL);
687
}
688
 
759 palkovsky 689
/* Go through all caches and reclaim what is possible */
690
count_t slab_reclaim(int flags)
691
{
692
    slab_cache_t *cache;
693
    link_t *cur;
694
    count_t frames = 0;
695
 
696
    spinlock_lock(&slab_cache_lock);
697
 
698
    for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
699
        cache = list_get_instance(cur, slab_cache_t, link);
700
        frames += _slab_reclaim(cache, flags);
701
    }
702
 
703
    spinlock_unlock(&slab_cache_lock);
704
 
705
    return frames;
706
}
707
 
708
 
709
/* Print list of slabs */
710
void slab_print_list(void)
711
{
712
    slab_cache_t *cache;
713
    link_t *cur;
714
 
715
    spinlock_lock(&slab_cache_lock);
767 palkovsky 716
    printf("SLAB name\tOsize\tPages\tObj/pg\tSlabs\tCached\tAllocobjs\tCtl\n");
759 palkovsky 717
    for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
718
        cache = list_get_instance(cur, slab_cache_t, link);
767 palkovsky 719
        printf("%s\t%d\t%d\t%d\t%d\t%d\t%d\t\t%s\n", cache->name, cache->size,
766 palkovsky 720
               (1 << cache->order), cache->objects,
767 palkovsky 721
               atomic_get(&cache->allocated_slabs),
722
               atomic_get(&cache->cached_objs),
766 palkovsky 723
               atomic_get(&cache->allocated_objs),
724
               cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out");
759 palkovsky 725
    }
726
    spinlock_unlock(&slab_cache_lock);
727
}
728
 
729
void slab_cache_init(void)
730
{
771 palkovsky 731
    int i, size;
732
 
759 palkovsky 733
    /* Initialize magazine cache */
734
    _slab_cache_create(&mag_cache,
735
               "slab_magazine",
736
               sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*),
737
               sizeof(__address),
738
               NULL, NULL,
769 palkovsky 739
               SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
740
    /* Initialize slab_cache cache */
741
    _slab_cache_create(&slab_cache_cache,
742
               "slab_cache",
743
               sizeof(slab_cache_cache) + config.cpu_count*sizeof(slab_cache_cache.mag_cache[0]),
744
               sizeof(__address),
745
               NULL, NULL,
746
               SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
747
    /* Initialize external slab cache */
748
    slab_extern_cache = slab_cache_create("slab_extern",
749
                          sizeof(slab_t),
750
                          0, NULL, NULL,
751
                          SLAB_CACHE_SLINSIDE);
759 palkovsky 752
 
753
    /* Initialize structures for malloc */
771 palkovsky 754
    for (i=0, size=(1<<SLAB_MIN_MALLOC_W);
755
         i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1);
756
         i++, size <<= 1) {
757
        malloc_caches[i] = slab_cache_create(malloc_names[i],
758
                             size, 0,
759
                             NULL,NULL,0);
760
    }
759 palkovsky 761
}
771 palkovsky 762
 
763
/**************************************/
764
/* kalloc/kfree functions             */
765
void * kalloc(unsigned int size, int flags)
766
{
767
    int idx;
768
 
769
    ASSERT( size && size <= (1 << SLAB_MAX_MALLOC_W));
770
 
771
    if (size < (1 << SLAB_MIN_MALLOC_W))
772
        size = (1 << SLAB_MIN_MALLOC_W);
773
 
774
    idx = fnzb(size-1) - SLAB_MIN_MALLOC_W + 1;
775
 
776
    return slab_alloc(malloc_caches[idx], flags);
777
}
778
 
779
 
780
void kfree(void *obj)
781
{
782
    slab_t *slab = obj2slab(obj);
783
 
784
    _slab_free(slab->cache, obj, slab);
785
}