Subversion Repositories HelenOS

Rev

Rev 759 | Rev 763 | Go to most recent revision | Show entire file | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 759 Rev 762
Line 30... Line 30...
30
#include <mm/slab.h>
30
#include <mm/slab.h>
31
#include <list.h>
31
#include <list.h>
32
#include <memstr.h>
32
#include <memstr.h>
33
#include <align.h>
33
#include <align.h>
34
#include <mm/heap.h>
34
#include <mm/heap.h>
-
 
35
#include <mm/frame.h>
35
#include <config.h>
36
#include <config.h>
36
#include <print.h>
37
#include <print.h>
37
#include <arch.h>
38
#include <arch.h>
38
#include <panic.h>
39
#include <panic.h>
-
 
40
#include <debug.h>
39
 
41
 
40
SPINLOCK_INITIALIZE(slab_cache_lock);
42
SPINLOCK_INITIALIZE(slab_cache_lock);
41
LIST_INITIALIZE(slab_cache_list);
43
LIST_INITIALIZE(slab_cache_list);
42
 
44
 
43
slab_cache_t mag_cache;
45
slab_cache_t mag_cache;
44
 
46
 
-
 
47
 
-
 
48
typedef struct {
-
 
49
    slab_cache_t *cache; /**< Pointer to parent cache */
-
 
50
    link_t link;       /* List of full/partial slabs */
-
 
51
    void *start;       /**< Start address of first available item */
-
 
52
    count_t available; /**< Count of available items in this slab */
-
 
53
    index_t nextavail; /**< The index of next available item */
-
 
54
}slab_t;
-
 
55
 
45
/**************************************/
56
/**************************************/
-
 
57
/* SLAB allocation functions          */
-
 
58
 
-
 
59
/**
-
 
60
 * Allocate frames for slab space and initialize
-
 
61
 *
-
 
62
 * TODO: Change slab_t allocation to slab_alloc(????), malloc with flags!!
-
 
63
 */
-
 
64
static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
-
 
65
{
-
 
66
    void *data;
-
 
67
    slab_t *slab;
-
 
68
    size_t fsize;
-
 
69
    int i;
-
 
70
    zone_t *zone = NULL;
-
 
71
    int status;
-
 
72
 
-
 
73
    data = (void *)frame_alloc(FRAME_KA | flags, cache->order, &status, &zone);
-
 
74
    if (status != FRAME_OK)
-
 
75
        return NULL;
-
 
76
 
-
 
77
    if (! cache->flags & SLAB_CACHE_SLINSIDE) {
-
 
78
        slab = malloc(sizeof(*slab)); // , flags);
-
 
79
        if (!slab) {
-
 
80
            frame_free((__address)data);
-
 
81
            return NULL;
-
 
82
        }
-
 
83
    } else {
-
 
84
        fsize = (PAGE_SIZE << cache->order);
-
 
85
        slab = data + fsize - sizeof(*slab);
-
 
86
    }
-
 
87
 
-
 
88
    /* Fill in slab structures */
-
 
89
    /* TODO: some better way of accessing the frame, although
-
 
90
     * the optimizer might optimize the division out :-/ */
-
 
91
    for (i=0; i< (1<<cache->order); i++) {
-
 
92
        ADDR2FRAME(zone, (__address)(data+i*PAGE_SIZE))->parent = slab;
-
 
93
    }
-
 
94
 
-
 
95
    slab->start = data;
-
 
96
    slab->available = cache->objects;
-
 
97
    slab->nextavail = 0;
-
 
98
 
-
 
99
    for (i=0; i<cache->objects;i++)
-
 
100
        *((int *) (slab->start + i*cache->size)) = i+1;
-
 
101
    return slab;
-
 
102
}
-
 
103
 
-
 
104
/**
-
 
105
 * Free space associated with SLAB
-
 
106
 *
-
 
107
 * @return number of freed frames
-
 
108
 */
-
 
109
static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
-
 
110
{
-
 
111
    frame_free((__address)slab->start);
-
 
112
    if (! cache->flags & SLAB_CACHE_SLINSIDE)
-
 
113
        free(slab);
-
 
114
    return 1 << cache->order;
-
 
115
}
-
 
116
 
-
 
117
/** Map object to slab structure */
-
 
118
static slab_t * obj2slab(void *obj)
-
 
119
{
-
 
120
    frame_t *frame;
-
 
121
 
-
 
122
    frame = frame_addr2frame((__address)obj);
-
 
123
    return (slab_t *)frame->parent;
-
 
124
}
-
 
125
 
-
 
126
/**************************************/
46
/* SLAB low level functions */
127
/* SLAB functions */
47
 
128
 
48
 
129
 
49
/**
130
/**
50
 * Return object to slab and call a destructor
131
 * Return object to slab and call a destructor
51
 *
132
 *
-
 
133
 * Assume the cache->lock is held;
-
 
134
 *
-
 
135
 * @param slab If the caller knows directly slab of the object, otherwise NULL
-
 
136
 *
52
 * @return Number of freed pages
137
 * @return Number of freed pages
53
 */
138
 */
54
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj)
139
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
-
 
140
                slab_t *slab)
55
{
141
{
-
 
142
    count_t frames = 0;
-
 
143
 
56
    return 0;
144
    if (!slab)
-
 
145
        slab = obj2slab(obj);
-
 
146
 
-
 
147
    spinlock_lock(cache->lock);
-
 
148
 
-
 
149
    *((int *)obj) = slab->nextavail;
-
 
150
    slab->nextavail = (obj - slab->start)/cache->size;
-
 
151
    slab->available++;
-
 
152
 
-
 
153
    /* Move it to correct list */
-
 
154
    if (slab->available == 1) {
-
 
155
        /* It was in full, move to partial */
-
 
156
        list_remove(&slab->link);
-
 
157
        list_prepend(&cache->partial_slabs, &slab->link);
-
 
158
    }
-
 
159
    if (slab->available == cache->objects) {
-
 
160
        /* Free associated memory */
-
 
161
        list_remove(&slab->link);
-
 
162
        /* Avoid deadlock */
-
 
163
        spinlock_unlock(&cache->lock);
-
 
164
        frames = slab_space_free(cache, slab);
-
 
165
        spinlock_lock(&cache->lock);
57
}
166
    }
58
 
167
 
-
 
168
    spinlock_unlock(cache->lock);
-
 
169
 
-
 
170
    return frames;
-
 
171
}
59
 
172
 
60
/**
173
/**
61
 * Take new object from slab or create new if needed
174
 * Take new object from slab or create new if needed
62
 *
175
 *
-
 
176
 * Assume cache->lock is held.
-
 
177
 *
63
 * @return Object address or null
178
 * @return Object address or null
64
 */
179
 */
65
static void * slab_obj_create(slab_cache_t *cache, int flags)
180
static void * slab_obj_create(slab_cache_t *cache, int flags)
66
{
181
{
-
 
182
    slab_t *slab;
-
 
183
    void *obj;
-
 
184
 
-
 
185
    if (list_empty(&cache->partial_slabs)) {
-
 
186
        /* Allow recursion and reclaiming
-
 
187
         * - this should work, as the SLAB control structures
-
 
188
         *   are small and do not need to allocte with anything
-
 
189
         *   other ten frame_alloc when they are allocating,
-
 
190
         *   that's why we should get recursion at most 1-level deep
-
 
191
         */
-
 
192
        spinlock_unlock(&cache->lock);
-
 
193
        slab = slab_space_alloc(cache, flags);
-
 
194
        spinlock_lock(&cache->lock);
-
 
195
        if (!slab)
67
    return NULL;
196
            return NULL;
-
 
197
    } else {
-
 
198
        slab = list_get_instance(cache->partial_slabs.next,
-
 
199
                     slab_t,
-
 
200
                     link);
-
 
201
        list_remove(&slab->link);
-
 
202
    }
-
 
203
    obj = slab->start + slab->nextavail * cache->size;
-
 
204
    slab->nextavail = *((int *)obj);
-
 
205
    slab->available--;
-
 
206
    if (! slab->available)
-
 
207
        list_prepend(&cache->full_slabs, &slab->link);
-
 
208
    else
-
 
209
        list_prepend(&cache->partial_slabs, &slab->link);
-
 
210
    return obj;
68
}
211
}
69
 
212
 
70
/**************************************/
213
/**************************************/
71
/* CPU-Cache slab functions */
214
/* CPU-Cache slab functions */
72
 
215
 
73
/**
216
/**
74
 * Free all objects in magazine and free memory associated with magazine
217
 * Free all objects in magazine and free memory associated with magazine
75
 *
218
 *
76
 * Assume cpu->lock is locked
219
 * Assume mag_cache[cpu].lock is locked
77
 *
220
 *
78
 * @return Number of freed pages
221
 * @return Number of freed pages
79
 */
222
 */
80
static count_t magazine_destroy(slab_cache_t *cache,
223
static count_t magazine_destroy(slab_cache_t *cache,
81
                slab_magazine_t *mag)
224
                slab_magazine_t *mag)
82
{
225
{
83
    int i;
226
    int i;
84
    count_t frames = 0;
227
    count_t frames = 0;
85
 
228
 
86
    for (i=0;i < mag->busy; i++)
229
    for (i=0;i < mag->busy; i++)
87
        frames += slab_obj_destroy(cache, mag->objs[i]);
230
        frames += slab_obj_destroy(cache, mag->objs[i], NULL);
88
   
231
   
89
    slab_free(&mag_cache, mag);
232
    slab_free(&mag_cache, mag);
90
 
233
 
91
    return frames;
234
    return frames;
92
}
235
}
Line 113... Line 256...
113
            cache->mag_cache[CPU->id].current = cache->mag_cache[CPU->id].last;
256
            cache->mag_cache[CPU->id].current = cache->mag_cache[CPU->id].last;
114
            cache->mag_cache[CPU->id].last = mag;
257
            cache->mag_cache[CPU->id].last = mag;
115
            mag = cache->mag_cache[CPU->id].current;
258
            mag = cache->mag_cache[CPU->id].current;
116
            goto gotit;
259
            goto gotit;
117
        }
260
        }
118
        /* If still not busy, exchange current with some frome
261
        /* If still not busy, exchange current with some from
119
         * other full magazines */
262
         * other full magazines */
120
        spinlock_lock(&cache->lock);
263
        spinlock_lock(&cache->lock);
121
        if (list_empty(&cache->magazines)) {
264
        if (list_empty(&cache->magazines)) {
122
            spinlock_unlock(&cache->lock);
265
            spinlock_unlock(&cache->lock);
123
            goto out;
266
            goto out;
Line 159... Line 302...
159
    mag = cache->mag_cache[CPU->id].current;
302
    mag = cache->mag_cache[CPU->id].current;
160
    if (!mag) {
303
    if (!mag) {
161
        /* We do not want to sleep just because of caching */
304
        /* We do not want to sleep just because of caching */
162
        /* Especially we do not want reclaiming to start, as
305
        /* Especially we do not want reclaiming to start, as
163
         * this would deadlock */
306
         * this would deadlock */
164
        mag = slab_alloc(&mag_cache, SLAB_ATOMIC | SLAB_NO_RECLAIM);
307
        mag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
165
        if (!mag) /* Allocation failed, give up on caching */
308
        if (!mag) /* Allocation failed, give up on caching */
166
            goto errout;
309
            goto errout;
167
 
310
 
168
        cache->mag_cache[CPU->id].current = mag;
311
        cache->mag_cache[CPU->id].current = mag;
169
        mag->size = SLAB_MAG_SIZE;
312
        mag->size = SLAB_MAG_SIZE;
Line 173... Line 316...
173
        mag = cache->mag_cache[CPU->id].last;
316
        mag = cache->mag_cache[CPU->id].last;
174
        if (!mag || mag->size == mag->busy) {
317
        if (!mag || mag->size == mag->busy) {
175
            if (mag)
318
            if (mag)
176
                list_prepend(&cache->magazines, &mag->link);
319
                list_prepend(&cache->magazines, &mag->link);
177
 
320
 
178
            mag = slab_alloc(&mag_cache, SLAB_ATOMIC | SLAB_NO_RECLAIM);
321
            mag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
179
            if (!mag)
322
            if (!mag)
180
                goto errout;
323
                goto errout;
181
           
324
           
182
            mag->size = SLAB_MAG_SIZE;
325
            mag->size = SLAB_MAG_SIZE;
183
            mag->busy = 0;
326
            mag->busy = 0;
Line 196... Line 339...
196
    return -1;
339
    return -1;
197
}
340
}
198
 
341
 
199
 
342
 
200
/**************************************/
343
/**************************************/
201
/* Top level SLAB functions */
344
/* SLAB CACHE functions */
-
 
345
 
-
 
346
/** Return number of objects that fit in certain cache size */
-
 
347
static int comp_objects(slab_cache_t *cache)
-
 
348
{
-
 
349
    if (cache->flags & SLAB_CACHE_SLINSIDE)
-
 
350
        return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
-
 
351
    else
-
 
352
        return (PAGE_SIZE << cache->order) / cache->size;
-
 
353
}
-
 
354
 
-
 
355
/** Return wasted space in slab */
-
 
356
static int badness(slab_cache_t *cache)
-
 
357
{
-
 
358
    int objects;
-
 
359
    int ssize;
-
 
360
 
-
 
361
    objects = comp_objects(cache);
-
 
362
    ssize = PAGE_SIZE << cache->order;
-
 
363
    if (cache->flags & SLAB_CACHE_SLINSIDE)
-
 
364
        ssize -= sizeof(slab_t);
-
 
365
    return ssize - objects*cache->size;
-
 
366
}
202
 
367
 
203
/** Initialize allocated memory as a slab cache */
368
/** Initialize allocated memory as a slab cache */
204
static void
369
static void
205
_slab_cache_create(slab_cache_t *cache,
370
_slab_cache_create(slab_cache_t *cache,
206
           char *name,
371
           char *name,
Line 212... Line 377...
212
{
377
{
213
    int i;
378
    int i;
214
 
379
 
215
    memsetb((__address)cache, sizeof(*cache), 0);
380
    memsetb((__address)cache, sizeof(*cache), 0);
216
    cache->name = name;
381
    cache->name = name;
217
    cache->align = align;
-
 
218
 
382
 
-
 
383
    if (align)
219
    cache->size = ALIGN_UP(size, align);
384
        size = ALIGN_UP(size, align);
-
 
385
    cache->size = size;
220
 
386
 
221
    cache->constructor = constructor;
387
    cache->constructor = constructor;
222
    cache->destructor = destructor;
388
    cache->destructor = destructor;
223
    cache->flags = flags;
389
    cache->flags = flags;
224
 
390
 
Line 234... Line 400...
234
 
400
 
235
    /* Compute slab sizes, object counts in slabs etc. */
401
    /* Compute slab sizes, object counts in slabs etc. */
236
    if (cache->size < SLAB_INSIDE_SIZE)
402
    if (cache->size < SLAB_INSIDE_SIZE)
237
        cache->flags |= SLAB_CACHE_SLINSIDE;
403
        cache->flags |= SLAB_CACHE_SLINSIDE;
238
 
404
 
-
 
405
    /* Minimum slab order */
-
 
406
    cache->order = (cache->size / PAGE_SIZE) + 1;
239
   
407
       
-
 
408
    while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
-
 
409
        cache->order += 1;
-
 
410
    }
-
 
411
 
-
 
412
    cache->objects = comp_objects(cache);
240
 
413
 
241
    spinlock_lock(&slab_cache_lock);
414
    spinlock_lock(&slab_cache_lock);
242
 
415
 
243
    list_append(&cache->link, &slab_cache_list);
416
    list_append(&cache->link, &slab_cache_list);
244
 
417
 
Line 264... Line 437...
264
/**
437
/**
265
 * Reclaim space occupied by objects that are already free
438
 * Reclaim space occupied by objects that are already free
266
 *
439
 *
267
 * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing
440
 * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing
268
 * @return Number of freed pages
441
 * @return Number of freed pages
-
 
442
 *
-
 
443
 * TODO: Add light reclaim
269
 */
444
 */
270
static count_t _slab_reclaim(slab_cache_t *cache, int flags)
445
static count_t _slab_reclaim(slab_cache_t *cache, int flags)
271
{
446
{
272
    int i;
447
    int i;
273
    slab_magazine_t *mag;
448
    slab_magazine_t *mag;
Line 281... Line 456...
281
    for (i=0; i < config.cpu_count; i++)
456
    for (i=0; i < config.cpu_count; i++)
282
        spinlock_lock(&cache->mag_cache[i].lock);
457
        spinlock_lock(&cache->mag_cache[i].lock);
283
    spinlock_lock(&cache->lock);
458
    spinlock_lock(&cache->lock);
284
   
459
   
285
    if (flags & SLAB_RECLAIM_ALL) {
460
    if (flags & SLAB_RECLAIM_ALL) {
-
 
461
        /* Aggressive memfree */
-
 
462
 
286
        /* Destroy CPU magazines */
463
        /* Destroy CPU magazines */
287
        for (i=0; i<config.cpu_count; i++) {
464
        for (i=0; i<config.cpu_count; i++) {
288
            mag = cache->mag_cache[i].current;
465
            mag = cache->mag_cache[i].current;
289
            if (mag)
466
            if (mag)
290
                frames += magazine_destroy(cache, mag);
467
                frames += magazine_destroy(cache, mag);
Line 293... Line 470...
293
            mag = cache->mag_cache[i].last;
470
            mag = cache->mag_cache[i].last;
294
            if (mag)
471
            if (mag)
295
                frames += magazine_destroy(cache, mag);
472
                frames += magazine_destroy(cache, mag);
296
            cache->mag_cache[i].last = NULL;
473
            cache->mag_cache[i].last = NULL;
297
        }
474
        }
-
 
475
    }
298
        /* Destroy full magazines */
476
    /* Destroy full magazines */
299
        cur=cache->magazines.next;
477
    cur=cache->magazines.prev;
300
        while (cur!=&cache->magazines) {
478
    while (cur!=&cache->magazines) {
301
            mag = list_get_instance(cur, slab_magazine_t, link);
479
        mag = list_get_instance(cur, slab_magazine_t, link);
302
           
480
       
303
            cur = cur->next;
481
        cur = cur->prev;
304
            list_remove(cur->prev);
482
        list_remove(cur->next);
305
            frames += magazine_destroy(cache,mag);
483
        frames += magazine_destroy(cache,mag);
-
 
484
        /* If we do not do full reclaim, break
-
 
485
         * as soon as something is freed */
-
 
486
        if (!(flags & SLAB_RECLAIM_ALL) && frames)
306
        }
487
            break;
307
    }
488
    }
308
   
489
   
309
    spinlock_unlock(&cache->lock);
490
    spinlock_unlock(&cache->lock);
310
    for (i=0; i < config.cpu_count; i++)
491
    for (i=0; i < config.cpu_count; i++)
311
        spinlock_unlock(&cache->mag_cache[i].lock);
492
        spinlock_unlock(&cache->mag_cache[i].lock);
Line 345... Line 526...
345
    ipl = interrupts_disable();
526
    ipl = interrupts_disable();
346
   
527
   
347
    if (!cache->flags & SLAB_CACHE_NOMAGAZINE)
528
    if (!cache->flags & SLAB_CACHE_NOMAGAZINE)
348
        result = magazine_obj_get(cache);
529
        result = magazine_obj_get(cache);
349
 
530
 
350
    if (!result)
531
    if (!result) {
-
 
532
        spinlock_lock(&cache->lock);
351
        result = slab_obj_create(cache, flags);
533
        result = slab_obj_create(cache, flags);
-
 
534
        spinlock_unlock(&cache->lock);
-
 
535
    }
352
 
536
 
353
    interrupts_restore(ipl);
537
    interrupts_restore(ipl);
354
 
538
 
355
    return result;
539
    return result;
356
}
540
}
Line 360... Line 544...
360
{
544
{
361
    ipl_t ipl;
545
    ipl_t ipl;
362
 
546
 
363
    ipl = interrupts_disable();
547
    ipl = interrupts_disable();
364
 
548
 
365
    if (cache->flags & SLAB_CACHE_NOMAGAZINE)
549
    if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
366
        slab_obj_destroy(cache, obj);
550
        || magazine_obj_put(cache, obj)) {
367
    else {
551
       
368
        if (magazine_obj_put(cache, obj)) /* If magazine put failed */
552
        spinlock_lock(&cache->lock);
369
            slab_obj_destroy(cache, obj);
553
        slab_obj_destroy(cache, obj, NULL);
-
 
554
        spinlock_unlock(&cache->lock);
370
    }
555
    }
371
    interrupts_restore(ipl);
556
    interrupts_restore(ipl);
372
}
557
}
373
 
558
 
374
/* Go through all caches and reclaim what is possible */
559
/* Go through all caches and reclaim what is possible */
Line 396... Line 581...
396
{
581
{
397
    slab_cache_t *cache;
582
    slab_cache_t *cache;
398
    link_t *cur;
583
    link_t *cur;
399
 
584
 
400
    spinlock_lock(&slab_cache_lock);
585
    spinlock_lock(&slab_cache_lock);
401
    printf("SLAB name\tObj size\n");
586
    printf("SLAB name\tOsize\tOrder\n");
402
    for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
587
    for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
403
        cache = list_get_instance(cur, slab_cache_t, link);
588
        cache = list_get_instance(cur, slab_cache_t, link);
404
        printf("%s\t%d\n", cache->name, cache->size);
589
        printf("%s\t%d\t%d\n", cache->name, cache->size, cache->order);
405
    }
590
    }
406
    spinlock_unlock(&slab_cache_lock);
591
    spinlock_unlock(&slab_cache_lock);
407
}
592
}
408
 
593
 
409
void slab_cache_init(void)
594
void slab_cache_init(void)