Subversion Repositories HelenOS-historic

Rev

Rev 765 | Rev 767 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed

Rev Author Line No. Line
759 palkovsky 1
/*
2
 * Copyright (C) 2006 Ondrej Palkovsky
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
29
#include <synch/spinlock.h>
30
#include <mm/slab.h>
31
#include <list.h>
32
#include <memstr.h>
33
#include <align.h>
34
#include <mm/heap.h>
762 palkovsky 35
#include <mm/frame.h>
759 palkovsky 36
#include <config.h>
37
#include <print.h>
38
#include <arch.h>
39
#include <panic.h>
762 palkovsky 40
#include <debug.h>
759 palkovsky 41
 
42
SPINLOCK_INITIALIZE(slab_cache_lock);
43
LIST_INITIALIZE(slab_cache_list);
44
 
45
slab_cache_t mag_cache;
46
 
762 palkovsky 47
 
48
typedef struct {
49
    slab_cache_t *cache; /**< Pointer to parent cache */
50
    link_t link;       /* List of full/partial slabs */
51
    void *start;       /**< Start address of first available item */
52
    count_t available; /**< Count of available items in this slab */
53
    index_t nextavail; /**< The index of next available item */
54
}slab_t;
55
 
759 palkovsky 56
/**************************************/
762 palkovsky 57
/* SLAB allocation functions          */
759 palkovsky 58
 
762 palkovsky 59
/**
60
 * Allocate frames for slab space and initialize
61
 *
62
 * TODO: Change slab_t allocation to slab_alloc(????), malloc with flags!!
63
 */
64
static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
65
{
66
    void *data;
67
    slab_t *slab;
68
    size_t fsize;
69
    int i;
70
    zone_t *zone = NULL;
71
    int status;
764 palkovsky 72
    frame_t *frame;
759 palkovsky 73
 
762 palkovsky 74
    data = (void *)frame_alloc(FRAME_KA | flags, cache->order, &status, &zone);
764 palkovsky 75
    if (status != FRAME_OK) {
762 palkovsky 76
        return NULL;
764 palkovsky 77
    }
762 palkovsky 78
    if (! cache->flags & SLAB_CACHE_SLINSIDE) {
79
        slab = malloc(sizeof(*slab)); // , flags);
80
        if (!slab) {
81
            frame_free((__address)data);
82
            return NULL;
83
        }
84
    } else {
85
        fsize = (PAGE_SIZE << cache->order);
86
        slab = data + fsize - sizeof(*slab);
87
    }
764 palkovsky 88
 
762 palkovsky 89
    /* Fill in slab structures */
763 jermar 90
    /* TODO: some better way of accessing the frame */
766 palkovsky 91
    for (i=0; i < (1 << cache->order); i++) {
764 palkovsky 92
        frame = ADDR2FRAME(zone, KA2PA((__address)(data+i*PAGE_SIZE)));
93
        frame->parent = slab;
762 palkovsky 94
    }
95
 
96
    slab->start = data;
97
    slab->available = cache->objects;
98
    slab->nextavail = 0;
99
 
100
    for (i=0; i<cache->objects;i++)
101
        *((int *) (slab->start + i*cache->size)) = i+1;
764 palkovsky 102
 
103
    atomic_inc(&cache->allocated_slabs);
104
 
762 palkovsky 105
    return slab;
106
}
107
 
759 palkovsky 108
/**
766 palkovsky 109
 * Deallocate space associated with SLAB
762 palkovsky 110
 *
111
 * @return number of freed frames
112
 */
113
static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
114
{
115
    frame_free((__address)slab->start);
116
    if (! cache->flags & SLAB_CACHE_SLINSIDE)
117
        free(slab);
764 palkovsky 118
 
119
    atomic_dec(&cache->allocated_slabs);
120
 
762 palkovsky 121
    return 1 << cache->order;
122
}
123
 
124
/** Map object to slab structure */
125
static slab_t * obj2slab(void *obj)
126
{
127
    frame_t *frame;
128
 
129
    frame = frame_addr2frame((__address)obj);
130
    return (slab_t *)frame->parent;
131
}
132
 
133
/**************************************/
134
/* SLAB functions */
135
 
136
 
137
/**
759 palkovsky 138
 * Return object to slab and call a destructor
139
 *
762 palkovsky 140
 * Assume the cache->lock is held;
141
 *
142
 * @param slab If the caller knows directly slab of the object, otherwise NULL
143
 *
759 palkovsky 144
 * @return Number of freed pages
145
 */
762 palkovsky 146
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
147
                slab_t *slab)
759 palkovsky 148
{
762 palkovsky 149
    count_t frames = 0;
150
 
151
    if (!slab)
152
        slab = obj2slab(obj);
153
 
154
    *((int *)obj) = slab->nextavail;
155
    slab->nextavail = (obj - slab->start)/cache->size;
156
    slab->available++;
157
 
158
    /* Move it to correct list */
159
    if (slab->available == 1) {
160
        /* It was in full, move to partial */
161
        list_remove(&slab->link);
764 palkovsky 162
        list_prepend(&slab->link, &cache->partial_slabs);
762 palkovsky 163
    }
164
    if (slab->available == cache->objects) {
165
        /* Free associated memory */
166
        list_remove(&slab->link);
167
        /* Avoid deadlock */
168
        spinlock_unlock(&cache->lock);
169
        frames = slab_space_free(cache, slab);
170
        spinlock_lock(&cache->lock);
171
    }
172
 
173
    return frames;
759 palkovsky 174
}
175
 
176
/**
177
 * Take new object from slab or create new if needed
178
 *
762 palkovsky 179
 * Assume cache->lock is held.
180
 *
759 palkovsky 181
 * @return Object address or null
182
 */
183
static void * slab_obj_create(slab_cache_t *cache, int flags)
184
{
762 palkovsky 185
    slab_t *slab;
186
    void *obj;
187
 
188
    if (list_empty(&cache->partial_slabs)) {
189
        /* Allow recursion and reclaiming
190
         * - this should work, as the SLAB control structures
191
         *   are small and do not need to allocte with anything
192
         *   other ten frame_alloc when they are allocating,
193
         *   that's why we should get recursion at most 1-level deep
194
         */
195
        spinlock_unlock(&cache->lock);
196
        slab = slab_space_alloc(cache, flags);
197
        spinlock_lock(&cache->lock);
764 palkovsky 198
        if (!slab) {
762 palkovsky 199
            return NULL;
764 palkovsky 200
        }
762 palkovsky 201
    } else {
202
        slab = list_get_instance(cache->partial_slabs.next,
203
                     slab_t,
204
                     link);
205
        list_remove(&slab->link);
206
    }
207
    obj = slab->start + slab->nextavail * cache->size;
208
    slab->nextavail = *((int *)obj);
209
    slab->available--;
210
    if (! slab->available)
764 palkovsky 211
        list_prepend(&slab->link, &cache->full_slabs);
762 palkovsky 212
    else
764 palkovsky 213
        list_prepend(&slab->link, &cache->partial_slabs);
762 palkovsky 214
    return obj;
759 palkovsky 215
}
216
 
217
/**************************************/
218
/* CPU-Cache slab functions */
219
 
220
/**
221
 * Free all objects in magazine and free memory associated with magazine
222
 *
762 palkovsky 223
 * Assume mag_cache[cpu].lock is locked
759 palkovsky 224
 *
225
 * @return Number of freed pages
226
 */
227
static count_t magazine_destroy(slab_cache_t *cache,
228
                slab_magazine_t *mag)
229
{
230
    int i;
231
    count_t frames = 0;
232
 
233
    for (i=0;i < mag->busy; i++)
762 palkovsky 234
        frames += slab_obj_destroy(cache, mag->objs[i], NULL);
759 palkovsky 235
 
236
    slab_free(&mag_cache, mag);
237
 
238
    return frames;
239
}
240
 
241
/**
242
 * Try to find object in CPU-cache magazines
243
 *
244
 * @return Pointer to object or NULL if not available
245
 */
246
static void * magazine_obj_get(slab_cache_t *cache)
247
{
248
    slab_magazine_t *mag;
249
 
250
    spinlock_lock(&cache->mag_cache[CPU->id].lock);
251
 
252
    mag = cache->mag_cache[CPU->id].current;
253
    if (!mag)
254
        goto out;
255
 
256
    if (!mag->busy) {
257
        /* If current is empty && last exists && not empty, exchange */
258
        if (cache->mag_cache[CPU->id].last \
259
            && cache->mag_cache[CPU->id].last->busy) {
260
            cache->mag_cache[CPU->id].current = cache->mag_cache[CPU->id].last;
261
            cache->mag_cache[CPU->id].last = mag;
262
            mag = cache->mag_cache[CPU->id].current;
263
            goto gotit;
264
        }
762 palkovsky 265
        /* If still not busy, exchange current with some from
759 palkovsky 266
         * other full magazines */
267
        spinlock_lock(&cache->lock);
268
        if (list_empty(&cache->magazines)) {
269
            spinlock_unlock(&cache->lock);
270
            goto out;
271
        }
272
        /* Free current magazine and take one from list */
273
        slab_free(&mag_cache, mag);
274
        mag = list_get_instance(cache->magazines.next,
275
                    slab_magazine_t,
276
                    link);
277
        list_remove(&mag->link);
278
 
279
        spinlock_unlock(&cache->lock);
280
    }
281
gotit:
282
    spinlock_unlock(&cache->mag_cache[CPU->id].lock);
283
    return mag->objs[--mag->busy];
284
out:   
285
    spinlock_unlock(&cache->mag_cache[CPU->id].lock);
286
    return NULL;
287
}
288
 
289
/**
290
 * Put object into CPU-cache magazine
291
 *
292
 * We have 2 magazines bound to processor.
293
 * First try the current.
294
 *  If full, try the last.
295
 *   If full, put to magazines list.
296
 *   allocate new, exchange last & current
297
 *
298
 * @return 0 - success, -1 - could not get memory
299
 */
300
static int magazine_obj_put(slab_cache_t *cache, void *obj)
301
{
302
    slab_magazine_t *mag;
303
 
304
    spinlock_lock(&cache->mag_cache[CPU->id].lock);
305
 
306
    mag = cache->mag_cache[CPU->id].current;
307
    if (!mag) {
308
        /* We do not want to sleep just because of caching */
309
        /* Especially we do not want reclaiming to start, as
310
         * this would deadlock */
762 palkovsky 311
        mag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
759 palkovsky 312
        if (!mag) /* Allocation failed, give up on caching */
313
            goto errout;
314
 
315
        cache->mag_cache[CPU->id].current = mag;
316
        mag->size = SLAB_MAG_SIZE;
317
        mag->busy = 0;
318
    } else if (mag->busy == mag->size) {
319
        /* If the last is full | empty, allocate new */
320
        mag = cache->mag_cache[CPU->id].last;
321
        if (!mag || mag->size == mag->busy) {
322
            if (mag)
764 palkovsky 323
                list_prepend(&mag->link, &cache->magazines);
759 palkovsky 324
 
762 palkovsky 325
            mag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
759 palkovsky 326
            if (!mag)
327
                goto errout;
328
 
329
            mag->size = SLAB_MAG_SIZE;
330
            mag->busy = 0;
331
            cache->mag_cache[CPU->id].last = mag;
332
        }
333
        /* Exchange the 2 */
334
        cache->mag_cache[CPU->id].last = cache->mag_cache[CPU->id].current;
335
        cache->mag_cache[CPU->id].current = mag;
336
    }
337
    mag->objs[mag->busy++] = obj;
338
 
339
    spinlock_unlock(&cache->mag_cache[CPU->id].lock);
340
    return 0;
341
errout:
342
    spinlock_unlock(&cache->mag_cache[CPU->id].lock);
343
    return -1;
344
}
345
 
346
 
347
/**************************************/
762 palkovsky 348
/* SLAB CACHE functions */
759 palkovsky 349
 
762 palkovsky 350
/** Return number of objects that fit in certain cache size */
351
static int comp_objects(slab_cache_t *cache)
352
{
353
    if (cache->flags & SLAB_CACHE_SLINSIDE)
354
        return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
355
    else
356
        return (PAGE_SIZE << cache->order) / cache->size;
357
}
358
 
359
/** Return wasted space in slab */
360
static int badness(slab_cache_t *cache)
361
{
362
    int objects;
363
    int ssize;
364
 
365
    objects = comp_objects(cache);
366
    ssize = PAGE_SIZE << cache->order;
367
    if (cache->flags & SLAB_CACHE_SLINSIDE)
368
        ssize -= sizeof(slab_t);
369
    return ssize - objects*cache->size;
370
}
371
 
759 palkovsky 372
/** Initialize allocated memory as a slab cache */
373
static void
374
_slab_cache_create(slab_cache_t *cache,
375
           char *name,
376
           size_t size,
377
           size_t align,
378
           int (*constructor)(void *obj, int kmflag),
379
           void (*destructor)(void *obj),
380
           int flags)
381
{
382
    int i;
383
 
384
    memsetb((__address)cache, sizeof(*cache), 0);
385
    cache->name = name;
386
 
766 palkovsky 387
    if (align < sizeof(__native))
388
        align = sizeof(__native);
389
    size = ALIGN_UP(size, align);
390
 
762 palkovsky 391
    cache->size = size;
759 palkovsky 392
 
393
    cache->constructor = constructor;
394
    cache->destructor = destructor;
395
    cache->flags = flags;
396
 
397
    list_initialize(&cache->full_slabs);
398
    list_initialize(&cache->partial_slabs);
399
    list_initialize(&cache->magazines);
400
    spinlock_initialize(&cache->lock, "cachelock");
401
    if (! cache->flags & SLAB_CACHE_NOMAGAZINE) {
402
        for (i=0; i< config.cpu_count; i++)
403
            spinlock_initialize(&cache->mag_cache[i].lock,
404
                        "cpucachelock");
405
    }
406
 
407
    /* Compute slab sizes, object counts in slabs etc. */
408
    if (cache->size < SLAB_INSIDE_SIZE)
409
        cache->flags |= SLAB_CACHE_SLINSIDE;
410
 
762 palkovsky 411
    /* Minimum slab order */
766 palkovsky 412
    cache->order = (cache->size-1) >> PAGE_WIDTH;
413
 
762 palkovsky 414
    while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
415
        cache->order += 1;
416
    }
417
    cache->objects = comp_objects(cache);
766 palkovsky 418
    /* If info fits in, put it inside */
419
    if (badness(cache) > sizeof(slab_t))
420
        cache->flags |= SLAB_CACHE_SLINSIDE;
762 palkovsky 421
 
759 palkovsky 422
    spinlock_lock(&slab_cache_lock);
423
 
424
    list_append(&cache->link, &slab_cache_list);
425
 
426
    spinlock_unlock(&slab_cache_lock);
427
}
428
 
429
/** Create slab cache  */
430
slab_cache_t * slab_cache_create(char *name,
431
                 size_t size,
432
                 size_t align,
433
                 int (*constructor)(void *obj, int kmflag),
434
                 void (*destructor)(void *obj),
435
                 int flags)
436
{
437
    slab_cache_t *cache;
438
 
439
    cache = malloc(sizeof(*cache) + config.cpu_count*sizeof(cache->mag_cache[0]));
440
    _slab_cache_create(cache, name, size, align, constructor, destructor,
441
               flags);
442
    return cache;
443
}
444
 
445
/**
446
 * Reclaim space occupied by objects that are already free
447
 *
448
 * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing
449
 * @return Number of freed pages
762 palkovsky 450
 *
451
 * TODO: Add light reclaim
759 palkovsky 452
 */
453
static count_t _slab_reclaim(slab_cache_t *cache, int flags)
454
{
455
    int i;
456
    slab_magazine_t *mag;
457
    link_t *cur;
458
    count_t frames = 0;
459
 
460
    if (cache->flags & SLAB_CACHE_NOMAGAZINE)
461
        return 0; /* Nothing to do */
462
 
463
    /* First lock all cpu caches, then the complete cache lock */
464
    for (i=0; i < config.cpu_count; i++)
465
        spinlock_lock(&cache->mag_cache[i].lock);
466
    spinlock_lock(&cache->lock);
467
 
468
    if (flags & SLAB_RECLAIM_ALL) {
762 palkovsky 469
        /* Aggressive memfree */
470
 
759 palkovsky 471
        /* Destroy CPU magazines */
472
        for (i=0; i<config.cpu_count; i++) {
473
            mag = cache->mag_cache[i].current;
474
            if (mag)
475
                frames += magazine_destroy(cache, mag);
476
            cache->mag_cache[i].current = NULL;
477
 
478
            mag = cache->mag_cache[i].last;
479
            if (mag)
480
                frames += magazine_destroy(cache, mag);
481
            cache->mag_cache[i].last = NULL;
482
        }
483
    }
762 palkovsky 484
    /* Destroy full magazines */
485
    cur=cache->magazines.prev;
486
    while (cur!=&cache->magazines) {
487
        mag = list_get_instance(cur, slab_magazine_t, link);
488
 
489
        cur = cur->prev;
490
        list_remove(cur->next);
491
        frames += magazine_destroy(cache,mag);
492
        /* If we do not do full reclaim, break
493
         * as soon as something is freed */
494
        if (!(flags & SLAB_RECLAIM_ALL) && frames)
495
            break;
496
    }
759 palkovsky 497
 
498
    spinlock_unlock(&cache->lock);
499
    for (i=0; i < config.cpu_count; i++)
500
        spinlock_unlock(&cache->mag_cache[i].lock);
501
 
502
    return frames;
503
}
504
 
505
/** Check that there are no slabs and remove cache from system  */
506
void slab_cache_destroy(slab_cache_t *cache)
507
{
508
    /* Do not lock anything, we assume the software is correct and
509
     * does not touch the cache when it decides to destroy it */
510
 
511
    /* Destroy all magazines */
512
    _slab_reclaim(cache, SLAB_RECLAIM_ALL);
513
 
514
    /* All slabs must be empty */
515
    if (!list_empty(&cache->full_slabs) \
516
        || !list_empty(&cache->partial_slabs))
517
        panic("Destroying cache that is not empty.");
518
 
519
    spinlock_lock(&slab_cache_lock);
520
    list_remove(&cache->link);
521
    spinlock_unlock(&slab_cache_lock);
522
 
523
    free(cache);
524
}
525
 
526
/** Allocate new object from cache - if no flags given, always returns
527
    memory */
528
void * slab_alloc(slab_cache_t *cache, int flags)
529
{
530
    ipl_t ipl;
531
    void *result = NULL;
532
 
533
    /* Disable interrupts to avoid deadlocks with interrupt handlers */
534
    ipl = interrupts_disable();
535
 
536
    if (!cache->flags & SLAB_CACHE_NOMAGAZINE)
537
        result = magazine_obj_get(cache);
538
 
762 palkovsky 539
    if (!result) {
540
        spinlock_lock(&cache->lock);
759 palkovsky 541
        result = slab_obj_create(cache, flags);
762 palkovsky 542
        spinlock_unlock(&cache->lock);
543
    }
759 palkovsky 544
 
764 palkovsky 545
    if (result)
546
        atomic_inc(&cache->allocated_objs);
547
 
759 palkovsky 548
    interrupts_restore(ipl);
549
 
764 palkovsky 550
 
759 palkovsky 551
    return result;
552
}
553
 
554
/** Return object to cache  */
555
void slab_free(slab_cache_t *cache, void *obj)
556
{
557
    ipl_t ipl;
558
 
559
    ipl = interrupts_disable();
560
 
762 palkovsky 561
    if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
562
        || magazine_obj_put(cache, obj)) {
563
 
564
        spinlock_lock(&cache->lock);
565
        slab_obj_destroy(cache, obj, NULL);
566
        spinlock_unlock(&cache->lock);
759 palkovsky 567
    }
764 palkovsky 568
    atomic_dec(&cache->allocated_objs);
759 palkovsky 569
    interrupts_restore(ipl);
570
}
571
 
572
/* Go through all caches and reclaim what is possible */
573
count_t slab_reclaim(int flags)
574
{
575
    slab_cache_t *cache;
576
    link_t *cur;
577
    count_t frames = 0;
578
 
579
    spinlock_lock(&slab_cache_lock);
580
 
581
    for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
582
        cache = list_get_instance(cur, slab_cache_t, link);
583
        frames += _slab_reclaim(cache, flags);
584
    }
585
 
586
    spinlock_unlock(&slab_cache_lock);
587
 
588
    return frames;
589
}
590
 
591
 
592
/* Print list of slabs */
593
void slab_print_list(void)
594
{
595
    slab_cache_t *cache;
596
    link_t *cur;
597
 
598
    spinlock_lock(&slab_cache_lock);
766 palkovsky 599
    printf("SLAB name\tOsize\tPages\tOcnt\tSlabs\tAllocobjs\tCtl\n");
759 palkovsky 600
    for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
601
        cache = list_get_instance(cur, slab_cache_t, link);
766 palkovsky 602
        printf("%s\t%d\t%d\t%d\t%d\t%d\t\t%s\n", cache->name, cache->size,
603
               (1 << cache->order), cache->objects,
764 palkovsky 604
               atomic_get(&cache->allocated_slabs),
766 palkovsky 605
               atomic_get(&cache->allocated_objs),
606
               cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out");
759 palkovsky 607
    }
608
    spinlock_unlock(&slab_cache_lock);
609
}
610
 
611
void slab_cache_init(void)
612
{
613
    /* Initialize magazine cache */
614
    _slab_cache_create(&mag_cache,
615
               "slab_magazine",
616
               sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*),
617
               sizeof(__address),
618
               NULL, NULL,
619
               SLAB_CACHE_NOMAGAZINE);
620
 
621
    /* Initialize structures for malloc */
622
}