Subversion Repositories HelenOS-historic

Rev

Rev 778 | Rev 781 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
759 palkovsky 1
/*
2
 * Copyright (C) 2006 Ondrej Palkovsky
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
769 palkovsky 29
/*
30
 * The SLAB allocator is closely modelled after Opensolaris SLAB allocator
31
 * http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/
32
 *
33
 * with the following exceptions:
34
 *   - empty SLABS are deallocated immediately
35
 *     (in Linux they are kept in linked list, in Solaris ???)
36
 *   - empty magazines are deallocated when not needed
37
 *     (in Solaris they are held in linked list in slab cache)
38
 *
39
 *   Following features are not currently supported but would be easy to do:
40
 *   - cache coloring
41
 *   - dynamic magazine growing (different magazine sizes are already
42
 *     supported, but we would need to adjust allocating strategy)
43
 *
44
 * The SLAB allocator supports per-CPU caches ('magazines') to facilitate
45
 * good SMP scaling.
46
 *
47
 * When a new object is being allocated, it is first checked, if it is
48
 * available in CPU-bound magazine. If it is not found there, it is
49
 * allocated from CPU-shared SLAB - if partial full is found, it is used,
50
 * otherwise a new one is allocated.
51
 *
52
 * When an object is being deallocated, it is put to CPU-bound magazine.
53
 * If there is no such magazine, new one is allocated (if it fails,
54
 * the object is deallocated into SLAB). If the magazine is full, it is
55
 * put into cpu-shared list of magazines and new one is allocated.
56
 *
57
 * The CPU-bound magazine is actually a pair of magazine to avoid
58
 * thrashing when somebody is allocating/deallocating 1 item at the magazine
59
 * size boundary. LIFO order is enforced, which should avoid fragmentation
60
 * as much as possible.
61
 *  
62
 * Every cache contains list of full slabs and list of partialy full slabs.
63
 * Empty SLABS are immediately freed (thrashing will be avoided because
64
 * of magazines).
65
 *
66
 * The SLAB information structure is kept inside the data area, if possible.
67
 * The cache can be marked that it should not use magazines. This is used
68
 * only for SLAB related caches to avoid deadlocks and infinite recursion
69
 * (the SLAB allocator uses itself for allocating all it's control structures).
70
 *
71
 * The SLAB allocator allocates lot of space and does not free it. When
72
 * frame allocator fails to allocate the frame, it calls slab_reclaim().
73
 * It tries 'light reclaim' first, then brutal reclaim. The light reclaim
74
 * releases slabs from cpu-shared magazine-list, until at least 1 slab
75
 * is deallocated in each cache (this algorithm should probably change).
76
 * The brutal reclaim removes all cached objects, even from CPU-bound
77
 * magazines.
78
 *
775 palkovsky 79
 * TODO: For better CPU-scaling the magazine allocation strategy should
80
 * be extended. Currently, if the cache does not have magazine, it asks
81
 * for non-cpu cached magazine cache to provide one. It might be feasible
82
 * to add cpu-cached magazine cache (which would allocate it's magazines
83
 * from non-cpu-cached mag. cache). This would provide a nice per-cpu
84
 * buffer. The other possibility is to use the per-cache
85
 * 'empty-magazine-list', which decreases competing for 1 per-system
86
 * magazine cache.
87
 *
776 palkovsky 88
 * - it might be good to add granularity of locks even to slab level,
89
 *   we could then try_spinlock over all partial slabs and thus improve
90
 *   scalability even on slab level
769 palkovsky 91
 */
92
 
93
 
759 palkovsky 94
#include <synch/spinlock.h>
95
#include <mm/slab.h>
96
#include <list.h>
97
#include <memstr.h>
98
#include <align.h>
99
#include <mm/heap.h>
762 palkovsky 100
#include <mm/frame.h>
759 palkovsky 101
#include <config.h>
102
#include <print.h>
103
#include <arch.h>
104
#include <panic.h>
762 palkovsky 105
#include <debug.h>
771 palkovsky 106
#include <bitops.h>
759 palkovsky 107
 
108
SPINLOCK_INITIALIZE(slab_cache_lock);
769 palkovsky 109
static LIST_INITIALIZE(slab_cache_list);
759 palkovsky 110
 
769 palkovsky 111
/** Magazine cache */
112
static slab_cache_t mag_cache;
113
/** Cache for cache descriptors */
114
static slab_cache_t slab_cache_cache;
759 palkovsky 115
 
769 palkovsky 116
/** Cache for external slab descriptors
117
 * This time we want per-cpu cache, so do not make it static
118
 * - using SLAB for internal SLAB structures will not deadlock,
119
 *   as all slab structures are 'small' - control structures of
120
 *   their caches do not require further allocation
121
 */
122
static slab_cache_t *slab_extern_cache;
771 palkovsky 123
/** Caches for malloc */
124
static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1];
125
char *malloc_names[] =  {
126
    "malloc-8","malloc-16","malloc-32","malloc-64","malloc-128",
127
    "malloc-256","malloc-512","malloc-1K","malloc-2K",
128
    "malloc-4K","malloc-8K","malloc-16K","malloc-32K",
129
    "malloc-64K","malloc-128K"
130
};
762 palkovsky 131
 
769 palkovsky 132
/** Slab descriptor */
762 palkovsky 133
typedef struct {
134
    slab_cache_t *cache; /**< Pointer to parent cache */
135
    link_t link;       /* List of full/partial slabs */
136
    void *start;       /**< Start address of first available item */
137
    count_t available; /**< Count of available items in this slab */
138
    index_t nextavail; /**< The index of next available item */
139
}slab_t;
140
 
759 palkovsky 141
/**************************************/
762 palkovsky 142
/* SLAB allocation functions          */
759 palkovsky 143
 
762 palkovsky 144
/**
145
 * Allocate frames for slab space and initialize
146
 *
147
 */
148
static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
149
{
150
    void *data;
151
    slab_t *slab;
152
    size_t fsize;
153
    int i;
154
    zone_t *zone = NULL;
155
    int status;
764 palkovsky 156
    frame_t *frame;
759 palkovsky 157
 
762 palkovsky 158
    data = (void *)frame_alloc(FRAME_KA | flags, cache->order, &status, &zone);
764 palkovsky 159
    if (status != FRAME_OK) {
762 palkovsky 160
        return NULL;
764 palkovsky 161
    }
768 palkovsky 162
    if (! (cache->flags & SLAB_CACHE_SLINSIDE)) {
769 palkovsky 163
        slab = slab_alloc(slab_extern_cache, flags);
762 palkovsky 164
        if (!slab) {
165
            frame_free((__address)data);
166
            return NULL;
167
        }
168
    } else {
169
        fsize = (PAGE_SIZE << cache->order);
170
        slab = data + fsize - sizeof(*slab);
171
    }
764 palkovsky 172
 
762 palkovsky 173
    /* Fill in slab structures */
763 jermar 174
    /* TODO: some better way of accessing the frame */
766 palkovsky 175
    for (i=0; i < (1 << cache->order); i++) {
764 palkovsky 176
        frame = ADDR2FRAME(zone, KA2PA((__address)(data+i*PAGE_SIZE)));
177
        frame->parent = slab;
762 palkovsky 178
    }
179
 
180
    slab->start = data;
181
    slab->available = cache->objects;
182
    slab->nextavail = 0;
767 palkovsky 183
    slab->cache = cache;
762 palkovsky 184
 
185
    for (i=0; i<cache->objects;i++)
186
        *((int *) (slab->start + i*cache->size)) = i+1;
764 palkovsky 187
 
188
    atomic_inc(&cache->allocated_slabs);
762 palkovsky 189
    return slab;
190
}
191
 
759 palkovsky 192
/**
766 palkovsky 193
 * Deallocate space associated with SLAB
762 palkovsky 194
 *
195
 * @return number of freed frames
196
 */
197
static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
198
{
199
    frame_free((__address)slab->start);
768 palkovsky 200
    if (! (cache->flags & SLAB_CACHE_SLINSIDE))
769 palkovsky 201
        slab_free(slab_extern_cache, slab);
764 palkovsky 202
 
203
    atomic_dec(&cache->allocated_slabs);
204
 
762 palkovsky 205
    return 1 << cache->order;
206
}
207
 
208
/** Map object to slab structure */
209
static slab_t * obj2slab(void *obj)
210
{
211
    frame_t *frame;
212
 
213
    frame = frame_addr2frame((__address)obj);
214
    return (slab_t *)frame->parent;
215
}
216
 
217
/**************************************/
218
/* SLAB functions */
219
 
220
 
221
/**
759 palkovsky 222
 * Return object to slab and call a destructor
223
 *
762 palkovsky 224
 * @param slab If the caller knows directly slab of the object, otherwise NULL
225
 *
759 palkovsky 226
 * @return Number of freed pages
227
 */
762 palkovsky 228
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
229
                slab_t *slab)
759 palkovsky 230
{
762 palkovsky 231
    count_t frames = 0;
232
 
233
    if (!slab)
234
        slab = obj2slab(obj);
235
 
767 palkovsky 236
    ASSERT(slab->cache == cache);
780 palkovsky 237
    ASSERT(slab->available < cache->objects);
767 palkovsky 238
 
776 palkovsky 239
    spinlock_lock(&cache->slablock);
240
 
762 palkovsky 241
    *((int *)obj) = slab->nextavail;
242
    slab->nextavail = (obj - slab->start)/cache->size;
243
    slab->available++;
244
 
245
    /* Move it to correct list */
246
    if (slab->available == cache->objects) {
247
        /* Free associated memory */
248
        list_remove(&slab->link);
776 palkovsky 249
        /* This should not produce deadlock, as
250
         * magazine is always allocated with NO reclaim,
251
         * keep all locks */
762 palkovsky 252
        frames = slab_space_free(cache, slab);
780 palkovsky 253
    } else if (slab->available == 1) {
254
        /* It was in full, move to partial */
255
        list_remove(&slab->link);
256
        list_prepend(&slab->link, &cache->partial_slabs);
762 palkovsky 257
    }
258
 
776 palkovsky 259
    spinlock_unlock(&cache->slablock);
260
 
762 palkovsky 261
    return frames;
759 palkovsky 262
}
263
 
264
/**
265
 * Take new object from slab or create new if needed
266
 *
267
 * @return Object address or null
268
 */
269
static void * slab_obj_create(slab_cache_t *cache, int flags)
270
{
762 palkovsky 271
    slab_t *slab;
272
    void *obj;
273
 
776 palkovsky 274
    spinlock_lock(&cache->slablock);
275
 
762 palkovsky 276
    if (list_empty(&cache->partial_slabs)) {
277
        /* Allow recursion and reclaiming
278
         * - this should work, as the SLAB control structures
279
         *   are small and do not need to allocte with anything
280
         *   other ten frame_alloc when they are allocating,
281
         *   that's why we should get recursion at most 1-level deep
282
         */
776 palkovsky 283
        spinlock_unlock(&cache->slablock);
762 palkovsky 284
        slab = slab_space_alloc(cache, flags);
780 palkovsky 285
        if (!slab)
286
            return NULL;
776 palkovsky 287
        spinlock_lock(&cache->slablock);
762 palkovsky 288
    } else {
289
        slab = list_get_instance(cache->partial_slabs.next,
290
                     slab_t,
291
                     link);
292
        list_remove(&slab->link);
293
    }
294
    obj = slab->start + slab->nextavail * cache->size;
295
    slab->nextavail = *((int *)obj);
296
    slab->available--;
297
    if (! slab->available)
764 palkovsky 298
        list_prepend(&slab->link, &cache->full_slabs);
762 palkovsky 299
    else
764 palkovsky 300
        list_prepend(&slab->link, &cache->partial_slabs);
776 palkovsky 301
 
302
    spinlock_unlock(&cache->slablock);
762 palkovsky 303
    return obj;
759 palkovsky 304
}
305
 
306
/**************************************/
307
/* CPU-Cache slab functions */
308
 
309
/**
310
 * Free all objects in magazine and free memory associated with magazine
311
 *
312
 * @return Number of freed pages
313
 */
314
static count_t magazine_destroy(slab_cache_t *cache,
315
                slab_magazine_t *mag)
316
{
317
    int i;
318
    count_t frames = 0;
319
 
767 palkovsky 320
    for (i=0;i < mag->busy; i++) {
762 palkovsky 321
        frames += slab_obj_destroy(cache, mag->objs[i], NULL);
767 palkovsky 322
        atomic_dec(&cache->cached_objs);
323
    }
759 palkovsky 324
 
325
    slab_free(&mag_cache, mag);
326
 
327
    return frames;
328
}
329
 
330
/**
769 palkovsky 331
 * Find full magazine, set it as current and return it
332
 *
333
 * Assume cpu_magazine lock is held
334
 */
335
static slab_magazine_t * get_full_current_mag(slab_cache_t *cache)
336
{
337
    slab_magazine_t *cmag, *lastmag, *newmag;
338
 
339
    cmag = cache->mag_cache[CPU->id].current;
340
    lastmag = cache->mag_cache[CPU->id].last;
341
    if (cmag) { /* First try local CPU magazines */
342
        if (cmag->busy)
343
            return cmag;
344
 
345
        if (lastmag && lastmag->busy) {
346
            cache->mag_cache[CPU->id].current = lastmag;
347
            cache->mag_cache[CPU->id].last = cmag;
348
            return lastmag;
349
        }
350
    }
351
    /* Local magazines are empty, import one from magazine list */
776 palkovsky 352
    spinlock_lock(&cache->maglock);
769 palkovsky 353
    if (list_empty(&cache->magazines)) {
776 palkovsky 354
        spinlock_unlock(&cache->maglock);
769 palkovsky 355
        return NULL;
356
    }
357
    newmag = list_get_instance(cache->magazines.next,
358
                   slab_magazine_t,
359
                   link);
360
    list_remove(&newmag->link);
776 palkovsky 361
    spinlock_unlock(&cache->maglock);
769 palkovsky 362
 
363
    if (lastmag)
364
        slab_free(&mag_cache, lastmag);
365
    cache->mag_cache[CPU->id].last = cmag;
366
    cache->mag_cache[CPU->id].current = newmag;
367
    return newmag;
368
}
369
 
370
/**
759 palkovsky 371
 * Try to find object in CPU-cache magazines
372
 *
373
 * @return Pointer to object or NULL if not available
374
 */
375
static void * magazine_obj_get(slab_cache_t *cache)
376
{
377
    slab_magazine_t *mag;
767 palkovsky 378
    void *obj;
759 palkovsky 379
 
772 palkovsky 380
    if (!CPU)
381
        return NULL;
382
 
759 palkovsky 383
    spinlock_lock(&cache->mag_cache[CPU->id].lock);
384
 
769 palkovsky 385
    mag = get_full_current_mag(cache);
386
    if (!mag) {
387
        spinlock_unlock(&cache->mag_cache[CPU->id].lock);
388
        return NULL;
759 palkovsky 389
    }
767 palkovsky 390
    obj = mag->objs[--mag->busy];
759 palkovsky 391
    spinlock_unlock(&cache->mag_cache[CPU->id].lock);
767 palkovsky 392
    atomic_dec(&cache->cached_objs);
393
 
394
    return obj;
759 palkovsky 395
}
396
 
397
/**
768 palkovsky 398
 * Assure that the current magazine is empty, return pointer to it, or NULL if
769 palkovsky 399
 * no empty magazine is available and cannot be allocated
759 palkovsky 400
 *
773 palkovsky 401
 * Assume mag_cache[CPU->id].lock is held
402
 *
759 palkovsky 403
 * We have 2 magazines bound to processor.
404
 * First try the current.
405
 *  If full, try the last.
406
 *   If full, put to magazines list.
407
 *   allocate new, exchange last & current
408
 *
768 palkovsky 409
 */
410
static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache)
411
{
412
    slab_magazine_t *cmag,*lastmag,*newmag;
413
 
414
    cmag = cache->mag_cache[CPU->id].current;
415
    lastmag = cache->mag_cache[CPU->id].last;
416
 
417
    if (cmag) {
418
        if (cmag->busy < cmag->size)
419
            return cmag;
420
        if (lastmag && lastmag->busy < lastmag->size) {
421
            cache->mag_cache[CPU->id].last = cmag;
422
            cache->mag_cache[CPU->id].current = lastmag;
423
            return lastmag;
424
        }
425
    }
426
    /* current | last are full | nonexistent, allocate new */
427
    /* We do not want to sleep just because of caching */
428
    /* Especially we do not want reclaiming to start, as
429
     * this would deadlock */
430
    newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
431
    if (!newmag)
432
        return NULL;
433
    newmag->size = SLAB_MAG_SIZE;
434
    newmag->busy = 0;
435
 
436
    /* Flush last to magazine list */
773 palkovsky 437
    if (lastmag) {
776 palkovsky 438
        spinlock_lock(&cache->maglock);
768 palkovsky 439
        list_prepend(&lastmag->link, &cache->magazines);
776 palkovsky 440
        spinlock_unlock(&cache->maglock);
773 palkovsky 441
    }
768 palkovsky 442
    /* Move current as last, save new as current */
443
    cache->mag_cache[CPU->id].last = cmag; 
444
    cache->mag_cache[CPU->id].current = newmag;
445
 
446
    return newmag;
447
}
448
 
449
/**
450
 * Put object into CPU-cache magazine
451
 *
759 palkovsky 452
 * @return 0 - success, -1 - could not get memory
453
 */
454
static int magazine_obj_put(slab_cache_t *cache, void *obj)
455
{
456
    slab_magazine_t *mag;
457
 
772 palkovsky 458
    if (!CPU)
459
        return -1;
460
 
759 palkovsky 461
    spinlock_lock(&cache->mag_cache[CPU->id].lock);
768 palkovsky 462
 
463
    mag = make_empty_current_mag(cache);
769 palkovsky 464
    if (!mag) {
465
        spinlock_unlock(&cache->mag_cache[CPU->id].lock);
466
        return -1;
467
    }
759 palkovsky 468
 
469
    mag->objs[mag->busy++] = obj;
470
 
471
    spinlock_unlock(&cache->mag_cache[CPU->id].lock);
767 palkovsky 472
    atomic_inc(&cache->cached_objs);
759 palkovsky 473
    return 0;
474
}
475
 
476
 
477
/**************************************/
762 palkovsky 478
/* SLAB CACHE functions */
759 palkovsky 479
 
762 palkovsky 480
/** Return number of objects that fit in certain cache size */
481
static int comp_objects(slab_cache_t *cache)
482
{
483
    if (cache->flags & SLAB_CACHE_SLINSIDE)
484
        return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
485
    else
486
        return (PAGE_SIZE << cache->order) / cache->size;
487
}
488
 
489
/** Return wasted space in slab */
490
static int badness(slab_cache_t *cache)
491
{
492
    int objects;
493
    int ssize;
494
 
495
    objects = comp_objects(cache);
496
    ssize = PAGE_SIZE << cache->order;
497
    if (cache->flags & SLAB_CACHE_SLINSIDE)
498
        ssize -= sizeof(slab_t);
499
    return ssize - objects*cache->size;
500
}
501
 
759 palkovsky 502
/** Initialize allocated memory as a slab cache */
503
static void
504
_slab_cache_create(slab_cache_t *cache,
505
           char *name,
506
           size_t size,
507
           size_t align,
508
           int (*constructor)(void *obj, int kmflag),
509
           void (*destructor)(void *obj),
510
           int flags)
511
{
512
    int i;
771 palkovsky 513
    int pages;
759 palkovsky 514
 
515
    memsetb((__address)cache, sizeof(*cache), 0);
516
    cache->name = name;
517
 
766 palkovsky 518
    if (align < sizeof(__native))
519
        align = sizeof(__native);
520
    size = ALIGN_UP(size, align);
521
 
762 palkovsky 522
    cache->size = size;
759 palkovsky 523
 
524
    cache->constructor = constructor;
525
    cache->destructor = destructor;
526
    cache->flags = flags;
527
 
528
    list_initialize(&cache->full_slabs);
529
    list_initialize(&cache->partial_slabs);
530
    list_initialize(&cache->magazines);
776 palkovsky 531
    spinlock_initialize(&cache->slablock, "slab_lock");
532
    spinlock_initialize(&cache->maglock, "slab_maglock");
768 palkovsky 533
    if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) {
773 palkovsky 534
        for (i=0; i < config.cpu_count; i++) {
772 palkovsky 535
            memsetb((__address)&cache->mag_cache[i],
536
                sizeof(cache->mag_cache[i]), 0);
759 palkovsky 537
            spinlock_initialize(&cache->mag_cache[i].lock,
776 palkovsky 538
                        "slab_maglock_cpu");
772 palkovsky 539
        }
759 palkovsky 540
    }
541
 
542
    /* Compute slab sizes, object counts in slabs etc. */
543
    if (cache->size < SLAB_INSIDE_SIZE)
544
        cache->flags |= SLAB_CACHE_SLINSIDE;
545
 
762 palkovsky 546
    /* Minimum slab order */
771 palkovsky 547
    pages = ((cache->size-1) >> PAGE_WIDTH) + 1;
548
    cache->order = fnzb(pages);
766 palkovsky 549
 
762 palkovsky 550
    while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
551
        cache->order += 1;
552
    }
553
    cache->objects = comp_objects(cache);
766 palkovsky 554
    /* If info fits in, put it inside */
555
    if (badness(cache) > sizeof(slab_t))
556
        cache->flags |= SLAB_CACHE_SLINSIDE;
762 palkovsky 557
 
759 palkovsky 558
    spinlock_lock(&slab_cache_lock);
559
 
560
    list_append(&cache->link, &slab_cache_list);
561
 
562
    spinlock_unlock(&slab_cache_lock);
563
}
564
 
565
/** Create slab cache  */
566
slab_cache_t * slab_cache_create(char *name,
567
                 size_t size,
568
                 size_t align,
569
                 int (*constructor)(void *obj, int kmflag),
570
                 void (*destructor)(void *obj),
571
                 int flags)
572
{
573
    slab_cache_t *cache;
574
 
769 palkovsky 575
    cache = slab_alloc(&slab_cache_cache, 0);
759 palkovsky 576
    _slab_cache_create(cache, name, size, align, constructor, destructor,
577
               flags);
578
    return cache;
579
}
580
 
581
/**
582
 * Reclaim space occupied by objects that are already free
583
 *
584
 * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing
585
 * @return Number of freed pages
586
 */
587
static count_t _slab_reclaim(slab_cache_t *cache, int flags)
588
{
589
    int i;
590
    slab_magazine_t *mag;
591
    link_t *cur;
592
    count_t frames = 0;
593
 
594
    if (cache->flags & SLAB_CACHE_NOMAGAZINE)
595
        return 0; /* Nothing to do */
596
 
597
    /* First lock all cpu caches, then the complete cache lock */
769 palkovsky 598
    if (flags & SLAB_RECLAIM_ALL) {
599
        for (i=0; i < config.cpu_count; i++)
600
            spinlock_lock(&cache->mag_cache[i].lock);
601
    }
776 palkovsky 602
    spinlock_lock(&cache->maglock);
759 palkovsky 603
 
604
    if (flags & SLAB_RECLAIM_ALL) {
762 palkovsky 605
        /* Aggressive memfree */
759 palkovsky 606
        /* Destroy CPU magazines */
607
        for (i=0; i<config.cpu_count; i++) {
608
            mag = cache->mag_cache[i].current;
609
            if (mag)
610
                frames += magazine_destroy(cache, mag);
611
            cache->mag_cache[i].current = NULL;
612
 
613
            mag = cache->mag_cache[i].last;
614
            if (mag)
615
                frames += magazine_destroy(cache, mag);
616
            cache->mag_cache[i].last = NULL;
617
        }
618
    }
776 palkovsky 619
    /* We can release the cache locks now */
620
    if (flags & SLAB_RECLAIM_ALL) {
621
        for (i=0; i < config.cpu_count; i++)
622
            spinlock_unlock(&cache->mag_cache[i].lock);
623
    }
762 palkovsky 624
    /* Destroy full magazines */
625
    cur=cache->magazines.prev;
767 palkovsky 626
 
768 palkovsky 627
    while (cur != &cache->magazines) {
762 palkovsky 628
        mag = list_get_instance(cur, slab_magazine_t, link);
629
 
630
        cur = cur->prev;
768 palkovsky 631
        list_remove(&mag->link);
762 palkovsky 632
        frames += magazine_destroy(cache,mag);
633
        /* If we do not do full reclaim, break
634
         * as soon as something is freed */
635
        if (!(flags & SLAB_RECLAIM_ALL) && frames)
636
            break;
637
    }
759 palkovsky 638
 
776 palkovsky 639
    spinlock_unlock(&cache->maglock);
759 palkovsky 640
 
641
    return frames;
642
}
643
 
644
/** Check that there are no slabs and remove cache from system  */
645
void slab_cache_destroy(slab_cache_t *cache)
646
{
647
    /* Do not lock anything, we assume the software is correct and
648
     * does not touch the cache when it decides to destroy it */
649
 
650
    /* Destroy all magazines */
651
    _slab_reclaim(cache, SLAB_RECLAIM_ALL);
652
 
653
    /* All slabs must be empty */
654
    if (!list_empty(&cache->full_slabs) \
655
        || !list_empty(&cache->partial_slabs))
656
        panic("Destroying cache that is not empty.");
657
 
658
    spinlock_lock(&slab_cache_lock);
659
    list_remove(&cache->link);
660
    spinlock_unlock(&slab_cache_lock);
661
 
769 palkovsky 662
    slab_free(&slab_cache_cache, cache);
759 palkovsky 663
}
664
 
665
/** Allocate new object from cache - if no flags given, always returns
666
    memory */
667
void * slab_alloc(slab_cache_t *cache, int flags)
668
{
669
    ipl_t ipl;
670
    void *result = NULL;
773 palkovsky 671
 
759 palkovsky 672
    /* Disable interrupts to avoid deadlocks with interrupt handlers */
673
    ipl = interrupts_disable();
771 palkovsky 674
 
772 palkovsky 675
    if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
759 palkovsky 676
        result = magazine_obj_get(cache);
677
 
776 palkovsky 678
    if (!result)
759 palkovsky 679
        result = slab_obj_create(cache, flags);
680
 
769 palkovsky 681
    interrupts_restore(ipl);
682
 
764 palkovsky 683
    if (result)
684
        atomic_inc(&cache->allocated_objs);
685
 
759 palkovsky 686
    return result;
687
}
688
 
771 palkovsky 689
/** Return object to cache, use slab if known  */
690
static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab)
759 palkovsky 691
{
692
    ipl_t ipl;
693
 
694
    ipl = interrupts_disable();
695
 
762 palkovsky 696
    if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
697
        || magazine_obj_put(cache, obj)) {
776 palkovsky 698
 
771 palkovsky 699
        slab_obj_destroy(cache, obj, slab);
776 palkovsky 700
 
759 palkovsky 701
    }
769 palkovsky 702
    interrupts_restore(ipl);
764 palkovsky 703
    atomic_dec(&cache->allocated_objs);
759 palkovsky 704
}
705
 
771 palkovsky 706
/** Return slab object to cache */
707
void slab_free(slab_cache_t *cache, void *obj)
708
{
709
    _slab_free(cache,obj,NULL);
710
}
711
 
759 palkovsky 712
/* Go through all caches and reclaim what is possible */
713
count_t slab_reclaim(int flags)
714
{
715
    slab_cache_t *cache;
716
    link_t *cur;
717
    count_t frames = 0;
718
 
719
    spinlock_lock(&slab_cache_lock);
720
 
776 palkovsky 721
    /* TODO: Add assert, that interrupts are disabled, otherwise
722
     * memory allocation from interrupts can deadlock.
723
     */
724
 
759 palkovsky 725
    for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
726
        cache = list_get_instance(cur, slab_cache_t, link);
727
        frames += _slab_reclaim(cache, flags);
728
    }
729
 
730
    spinlock_unlock(&slab_cache_lock);
731
 
732
    return frames;
733
}
734
 
735
 
736
/* Print list of slabs */
737
void slab_print_list(void)
738
{
739
    slab_cache_t *cache;
740
    link_t *cur;
741
 
742
    spinlock_lock(&slab_cache_lock);
767 palkovsky 743
    printf("SLAB name\tOsize\tPages\tObj/pg\tSlabs\tCached\tAllocobjs\tCtl\n");
759 palkovsky 744
    for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
745
        cache = list_get_instance(cur, slab_cache_t, link);
767 palkovsky 746
        printf("%s\t%d\t%d\t%d\t%d\t%d\t%d\t\t%s\n", cache->name, cache->size,
766 palkovsky 747
               (1 << cache->order), cache->objects,
767 palkovsky 748
               atomic_get(&cache->allocated_slabs),
749
               atomic_get(&cache->cached_objs),
766 palkovsky 750
               atomic_get(&cache->allocated_objs),
751
               cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out");
759 palkovsky 752
    }
753
    spinlock_unlock(&slab_cache_lock);
754
}
755
 
778 palkovsky 756
#ifdef CONFIG_DEBUG
757
static int _slab_initialized = 0;
758
#endif
759
 
759 palkovsky 760
void slab_cache_init(void)
761
{
771 palkovsky 762
    int i, size;
763
 
759 palkovsky 764
    /* Initialize magazine cache */
765
    _slab_cache_create(&mag_cache,
766
               "slab_magazine",
767
               sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*),
768
               sizeof(__address),
769
               NULL, NULL,
769 palkovsky 770
               SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
771
    /* Initialize slab_cache cache */
772
    _slab_cache_create(&slab_cache_cache,
773
               "slab_cache",
774
               sizeof(slab_cache_cache) + config.cpu_count*sizeof(slab_cache_cache.mag_cache[0]),
775
               sizeof(__address),
776
               NULL, NULL,
777
               SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
778
    /* Initialize external slab cache */
779
    slab_extern_cache = slab_cache_create("slab_extern",
780
                          sizeof(slab_t),
781
                          0, NULL, NULL,
782
                          SLAB_CACHE_SLINSIDE);
759 palkovsky 783
 
784
    /* Initialize structures for malloc */
771 palkovsky 785
    for (i=0, size=(1<<SLAB_MIN_MALLOC_W);
786
         i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1);
787
         i++, size <<= 1) {
788
        malloc_caches[i] = slab_cache_create(malloc_names[i],
789
                             size, 0,
790
                             NULL,NULL,0);
791
    }
778 palkovsky 792
#ifdef CONFIG_DEBUG       
793
    _slab_initialized = 1;
794
#endif
759 palkovsky 795
}
771 palkovsky 796
 
797
/**************************************/
798
/* kalloc/kfree functions             */
799
void * kalloc(unsigned int size, int flags)
800
{
801
    int idx;
778 palkovsky 802
 
803
    ASSERT(_slab_initialized);
771 palkovsky 804
    ASSERT( size && size <= (1 << SLAB_MAX_MALLOC_W));
805
 
806
    if (size < (1 << SLAB_MIN_MALLOC_W))
807
        size = (1 << SLAB_MIN_MALLOC_W);
808
 
809
    idx = fnzb(size-1) - SLAB_MIN_MALLOC_W + 1;
810
 
811
    return slab_alloc(malloc_caches[idx], flags);
812
}
813
 
814
 
815
void kfree(void *obj)
816
{
817
    slab_t *slab = obj2slab(obj);
818
 
819
    _slab_free(slab->cache, obj, slab);
820
}