Subversion Repositories HelenOS-historic

Rev

Rev 766 | Rev 768 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 766 Rev 767
1
/*
1
/*
2
 * Copyright (C) 2006 Ondrej Palkovsky
2
 * Copyright (C) 2006 Ondrej Palkovsky
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
#include <synch/spinlock.h>
29
#include <synch/spinlock.h>
30
#include <mm/slab.h>
30
#include <mm/slab.h>
31
#include <list.h>
31
#include <list.h>
32
#include <memstr.h>
32
#include <memstr.h>
33
#include <align.h>
33
#include <align.h>
34
#include <mm/heap.h>
34
#include <mm/heap.h>
35
#include <mm/frame.h>
35
#include <mm/frame.h>
36
#include <config.h>
36
#include <config.h>
37
#include <print.h>
37
#include <print.h>
38
#include <arch.h>
38
#include <arch.h>
39
#include <panic.h>
39
#include <panic.h>
40
#include <debug.h>
40
#include <debug.h>
41
 
41
 
42
SPINLOCK_INITIALIZE(slab_cache_lock);
42
SPINLOCK_INITIALIZE(slab_cache_lock);
43
LIST_INITIALIZE(slab_cache_list);
43
LIST_INITIALIZE(slab_cache_list);
44
 
44
 
45
slab_cache_t mag_cache;
45
slab_cache_t mag_cache;
46
 
46
 
47
 
47
 
48
typedef struct {
48
typedef struct {
49
    slab_cache_t *cache; /**< Pointer to parent cache */
49
    slab_cache_t *cache; /**< Pointer to parent cache */
50
    link_t link;       /* List of full/partial slabs */
50
    link_t link;       /* List of full/partial slabs */
51
    void *start;       /**< Start address of first available item */
51
    void *start;       /**< Start address of first available item */
52
    count_t available; /**< Count of available items in this slab */
52
    count_t available; /**< Count of available items in this slab */
53
    index_t nextavail; /**< The index of next available item */
53
    index_t nextavail; /**< The index of next available item */
54
}slab_t;
54
}slab_t;
55
 
55
 
56
/**************************************/
56
/**************************************/
57
/* SLAB allocation functions          */
57
/* SLAB allocation functions          */
58
 
58
 
59
/**
59
/**
60
 * Allocate frames for slab space and initialize
60
 * Allocate frames for slab space and initialize
61
 *
61
 *
62
 * TODO: Change slab_t allocation to slab_alloc(????), malloc with flags!!
62
 * TODO: Change slab_t allocation to slab_alloc(????), malloc with flags!!
63
 */
63
 */
64
static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
64
static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
65
{
65
{
66
    void *data;
66
    void *data;
67
    slab_t *slab;
67
    slab_t *slab;
68
    size_t fsize;
68
    size_t fsize;
69
    int i;
69
    int i;
70
    zone_t *zone = NULL;
70
    zone_t *zone = NULL;
71
    int status;
71
    int status;
72
    frame_t *frame;
72
    frame_t *frame;
73
 
73
 
74
    data = (void *)frame_alloc(FRAME_KA | flags, cache->order, &status, &zone);
74
    data = (void *)frame_alloc(FRAME_KA | flags, cache->order, &status, &zone);
75
    if (status != FRAME_OK) {
75
    if (status != FRAME_OK) {
76
        return NULL;
76
        return NULL;
77
    }
77
    }
78
    if (! cache->flags & SLAB_CACHE_SLINSIDE) {
78
    if (! cache->flags & SLAB_CACHE_SLINSIDE) {
79
        slab = malloc(sizeof(*slab)); // , flags);
79
        slab = malloc(sizeof(*slab)); // , flags);
80
        if (!slab) {
80
        if (!slab) {
81
            frame_free((__address)data);
81
            frame_free((__address)data);
82
            return NULL;
82
            return NULL;
83
        }
83
        }
84
    } else {
84
    } else {
85
        fsize = (PAGE_SIZE << cache->order);
85
        fsize = (PAGE_SIZE << cache->order);
86
        slab = data + fsize - sizeof(*slab);
86
        slab = data + fsize - sizeof(*slab);
87
    }
87
    }
88
       
88
       
89
    /* Fill in slab structures */
89
    /* Fill in slab structures */
90
    /* TODO: some better way of accessing the frame */
90
    /* TODO: some better way of accessing the frame */
91
    for (i=0; i < (1 << cache->order); i++) {
91
    for (i=0; i < (1 << cache->order); i++) {
92
        frame = ADDR2FRAME(zone, KA2PA((__address)(data+i*PAGE_SIZE)));
92
        frame = ADDR2FRAME(zone, KA2PA((__address)(data+i*PAGE_SIZE)));
93
        frame->parent = slab;
93
        frame->parent = slab;
94
    }
94
    }
95
 
95
 
96
    slab->start = data;
96
    slab->start = data;
97
    slab->available = cache->objects;
97
    slab->available = cache->objects;
98
    slab->nextavail = 0;
98
    slab->nextavail = 0;
-
 
99
    slab->cache = cache;
99
 
100
 
100
    for (i=0; i<cache->objects;i++)
101
    for (i=0; i<cache->objects;i++)
101
        *((int *) (slab->start + i*cache->size)) = i+1;
102
        *((int *) (slab->start + i*cache->size)) = i+1;
102
 
103
 
103
    atomic_inc(&cache->allocated_slabs);
104
    atomic_inc(&cache->allocated_slabs);
104
 
105
 
105
    return slab;
106
    return slab;
106
}
107
}
107
 
108
 
108
/**
109
/**
109
 * Deallocate space associated with SLAB
110
 * Deallocate space associated with SLAB
110
 *
111
 *
111
 * @return number of freed frames
112
 * @return number of freed frames
112
 */
113
 */
113
static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
114
static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
114
{
115
{
115
    frame_free((__address)slab->start);
116
    frame_free((__address)slab->start);
116
    if (! cache->flags & SLAB_CACHE_SLINSIDE)
117
    if (! cache->flags & SLAB_CACHE_SLINSIDE)
117
        free(slab);
118
        free(slab);
118
 
119
 
119
    atomic_dec(&cache->allocated_slabs);
120
    atomic_dec(&cache->allocated_slabs);
120
   
121
   
121
    return 1 << cache->order;
122
    return 1 << cache->order;
122
}
123
}
123
 
124
 
124
/** Map object to slab structure */
125
/** Map object to slab structure */
125
static slab_t * obj2slab(void *obj)
126
static slab_t * obj2slab(void *obj)
126
{
127
{
127
    frame_t *frame;
128
    frame_t *frame;
128
 
129
 
129
    frame = frame_addr2frame((__address)obj);
130
    frame = frame_addr2frame((__address)obj);
130
    return (slab_t *)frame->parent;
131
    return (slab_t *)frame->parent;
131
}
132
}
132
 
133
 
133
/**************************************/
134
/**************************************/
134
/* SLAB functions */
135
/* SLAB functions */
135
 
136
 
136
 
137
 
137
/**
138
/**
138
 * Return object to slab and call a destructor
139
 * Return object to slab and call a destructor
139
 *
140
 *
140
 * Assume the cache->lock is held;
141
 * Assume the cache->lock is held;
141
 *
142
 *
142
 * @param slab If the caller knows directly slab of the object, otherwise NULL
143
 * @param slab If the caller knows directly slab of the object, otherwise NULL
143
 *
144
 *
144
 * @return Number of freed pages
145
 * @return Number of freed pages
145
 */
146
 */
146
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
147
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
147
                slab_t *slab)
148
                slab_t *slab)
148
{
149
{
149
    count_t frames = 0;
150
    count_t frames = 0;
150
 
151
 
151
    if (!slab)
152
    if (!slab)
152
        slab = obj2slab(obj);
153
        slab = obj2slab(obj);
153
 
154
 
-
 
155
    ASSERT(slab->cache == cache);
-
 
156
 
154
    *((int *)obj) = slab->nextavail;
157
    *((int *)obj) = slab->nextavail;
155
    slab->nextavail = (obj - slab->start)/cache->size;
158
    slab->nextavail = (obj - slab->start)/cache->size;
156
    slab->available++;
159
    slab->available++;
157
 
160
 
158
    /* Move it to correct list */
161
    /* Move it to correct list */
159
    if (slab->available == 1) {
162
    if (slab->available == 1) {
160
        /* It was in full, move to partial */
163
        /* It was in full, move to partial */
161
        list_remove(&slab->link);
164
        list_remove(&slab->link);
162
        list_prepend(&slab->link, &cache->partial_slabs);
165
        list_prepend(&slab->link, &cache->partial_slabs);
163
    }
166
    }
164
    if (slab->available == cache->objects) {
167
    if (slab->available == cache->objects) {
165
        /* Free associated memory */
168
        /* Free associated memory */
166
        list_remove(&slab->link);
169
        list_remove(&slab->link);
167
        /* Avoid deadlock */
170
        /* Avoid deadlock */
168
        spinlock_unlock(&cache->lock);
171
        spinlock_unlock(&cache->lock);
169
        frames = slab_space_free(cache, slab);
172
        frames = slab_space_free(cache, slab);
170
        spinlock_lock(&cache->lock);
173
        spinlock_lock(&cache->lock);
171
    }
174
    }
172
 
175
 
173
    return frames;
176
    return frames;
174
}
177
}
175
 
178
 
176
/**
179
/**
177
 * Take new object from slab or create new if needed
180
 * Take new object from slab or create new if needed
178
 *
181
 *
179
 * Assume cache->lock is held.
182
 * Assume cache->lock is held.
180
 *
183
 *
181
 * @return Object address or null
184
 * @return Object address or null
182
 */
185
 */
183
static void * slab_obj_create(slab_cache_t *cache, int flags)
186
static void * slab_obj_create(slab_cache_t *cache, int flags)
184
{
187
{
185
    slab_t *slab;
188
    slab_t *slab;
186
    void *obj;
189
    void *obj;
187
 
190
 
188
    if (list_empty(&cache->partial_slabs)) {
191
    if (list_empty(&cache->partial_slabs)) {
189
        /* Allow recursion and reclaiming
192
        /* Allow recursion and reclaiming
190
         * - this should work, as the SLAB control structures
193
         * - this should work, as the SLAB control structures
191
         *   are small and do not need to allocte with anything
194
         *   are small and do not need to allocte with anything
192
         *   other ten frame_alloc when they are allocating,
195
         *   other ten frame_alloc when they are allocating,
193
         *   that's why we should get recursion at most 1-level deep
196
         *   that's why we should get recursion at most 1-level deep
194
         */
197
         */
195
        spinlock_unlock(&cache->lock);
198
        spinlock_unlock(&cache->lock);
196
        slab = slab_space_alloc(cache, flags);
199
        slab = slab_space_alloc(cache, flags);
197
        spinlock_lock(&cache->lock);
200
        spinlock_lock(&cache->lock);
198
        if (!slab) {
201
        if (!slab) {
199
            return NULL;
202
            return NULL;
200
        }
203
        }
201
    } else {
204
    } else {
202
        slab = list_get_instance(cache->partial_slabs.next,
205
        slab = list_get_instance(cache->partial_slabs.next,
203
                     slab_t,
206
                     slab_t,
204
                     link);
207
                     link);
205
        list_remove(&slab->link);
208
        list_remove(&slab->link);
206
    }
209
    }
207
    obj = slab->start + slab->nextavail * cache->size;
210
    obj = slab->start + slab->nextavail * cache->size;
208
    slab->nextavail = *((int *)obj);
211
    slab->nextavail = *((int *)obj);
209
    slab->available--;
212
    slab->available--;
210
    if (! slab->available)
213
    if (! slab->available)
211
        list_prepend(&slab->link, &cache->full_slabs);
214
        list_prepend(&slab->link, &cache->full_slabs);
212
    else
215
    else
213
        list_prepend(&slab->link, &cache->partial_slabs);
216
        list_prepend(&slab->link, &cache->partial_slabs);
214
    return obj;
217
    return obj;
215
}
218
}
216
 
219
 
217
/**************************************/
220
/**************************************/
218
/* CPU-Cache slab functions */
221
/* CPU-Cache slab functions */
219
 
222
 
220
/**
223
/**
221
 * Free all objects in magazine and free memory associated with magazine
224
 * Free all objects in magazine and free memory associated with magazine
222
 *
225
 *
223
 * Assume mag_cache[cpu].lock is locked
226
 * Assume mag_cache[cpu].lock is locked
224
 *
227
 *
225
 * @return Number of freed pages
228
 * @return Number of freed pages
226
 */
229
 */
227
static count_t magazine_destroy(slab_cache_t *cache,
230
static count_t magazine_destroy(slab_cache_t *cache,
228
                slab_magazine_t *mag)
231
                slab_magazine_t *mag)
229
{
232
{
230
    int i;
233
    int i;
231
    count_t frames = 0;
234
    count_t frames = 0;
232
 
235
 
233
    for (i=0;i < mag->busy; i++)
236
    for (i=0;i < mag->busy; i++) {
234
        frames += slab_obj_destroy(cache, mag->objs[i], NULL);
237
        frames += slab_obj_destroy(cache, mag->objs[i], NULL);
-
 
238
        atomic_dec(&cache->cached_objs);
-
 
239
    }
235
   
240
   
236
    slab_free(&mag_cache, mag);
241
    slab_free(&mag_cache, mag);
237
 
242
 
238
    return frames;
243
    return frames;
239
}
244
}
240
 
245
 
241
/**
246
/**
242
 * Try to find object in CPU-cache magazines
247
 * Try to find object in CPU-cache magazines
243
 *
248
 *
244
 * @return Pointer to object or NULL if not available
249
 * @return Pointer to object or NULL if not available
245
 */
250
 */
246
static void * magazine_obj_get(slab_cache_t *cache)
251
static void * magazine_obj_get(slab_cache_t *cache)
247
{
252
{
248
    slab_magazine_t *mag;
253
    slab_magazine_t *mag;
-
 
254
    void *obj;
249
 
255
 
250
    spinlock_lock(&cache->mag_cache[CPU->id].lock);
256
    spinlock_lock(&cache->mag_cache[CPU->id].lock);
251
 
257
 
252
    mag = cache->mag_cache[CPU->id].current;
258
    mag = cache->mag_cache[CPU->id].current;
253
    if (!mag)
259
    if (!mag)
254
        goto out;
260
        goto out;
255
 
261
 
256
    if (!mag->busy) {
262
    if (!mag->busy) {
257
        /* If current is empty && last exists && not empty, exchange */
263
        /* If current is empty && last exists && not empty, exchange */
258
        if (cache->mag_cache[CPU->id].last \
264
        if (cache->mag_cache[CPU->id].last \
259
            && cache->mag_cache[CPU->id].last->busy) {
265
            && cache->mag_cache[CPU->id].last->busy) {
260
            cache->mag_cache[CPU->id].current = cache->mag_cache[CPU->id].last;
266
            cache->mag_cache[CPU->id].current = cache->mag_cache[CPU->id].last;
261
            cache->mag_cache[CPU->id].last = mag;
267
            cache->mag_cache[CPU->id].last = mag;
262
            mag = cache->mag_cache[CPU->id].current;
268
            mag = cache->mag_cache[CPU->id].current;
263
            goto gotit;
269
            goto gotit;
264
        }
270
        }
265
        /* If still not busy, exchange current with some from
271
        /* If still not busy, exchange current with some from
266
         * other full magazines */
272
         * other full magazines */
267
        spinlock_lock(&cache->lock);
273
        spinlock_lock(&cache->lock);
268
        if (list_empty(&cache->magazines)) {
274
        if (list_empty(&cache->magazines)) {
269
            spinlock_unlock(&cache->lock);
275
            spinlock_unlock(&cache->lock);
270
            goto out;
276
            goto out;
271
        }
277
        }
272
        /* Free current magazine and take one from list */
278
        /* Free current magazine and take one from list */
273
        slab_free(&mag_cache, mag);
279
        slab_free(&mag_cache, mag);
274
        mag = list_get_instance(cache->magazines.next,
280
        mag = list_get_instance(cache->magazines.next,
275
                    slab_magazine_t,
281
                    slab_magazine_t,
276
                    link);
282
                    link);
277
        list_remove(&mag->link);
283
        list_remove(&mag->link);
278
       
284
       
279
        spinlock_unlock(&cache->lock);
285
        spinlock_unlock(&cache->lock);
280
    }
286
    }
281
gotit:
287
gotit:
-
 
288
    obj = mag->objs[--mag->busy];
282
    spinlock_unlock(&cache->mag_cache[CPU->id].lock);
289
    spinlock_unlock(&cache->mag_cache[CPU->id].lock);
-
 
290
    atomic_dec(&cache->cached_objs);
-
 
291
   
283
    return mag->objs[--mag->busy];
292
    return obj;
284
out:   
293
out:   
285
    spinlock_unlock(&cache->mag_cache[CPU->id].lock);
294
    spinlock_unlock(&cache->mag_cache[CPU->id].lock);
286
    return NULL;
295
    return NULL;
287
}
296
}
288
 
297
 
289
/**
298
/**
290
 * Put object into CPU-cache magazine
299
 * Put object into CPU-cache magazine
291
 *
300
 *
292
 * We have 2 magazines bound to processor.
301
 * We have 2 magazines bound to processor.
293
 * First try the current.
302
 * First try the current.
294
 *  If full, try the last.
303
 *  If full, try the last.
295
 *   If full, put to magazines list.
304
 *   If full, put to magazines list.
296
 *   allocate new, exchange last & current
305
 *   allocate new, exchange last & current
297
 *
306
 *
298
 * @return 0 - success, -1 - could not get memory
307
 * @return 0 - success, -1 - could not get memory
299
 */
308
 */
300
static int magazine_obj_put(slab_cache_t *cache, void *obj)
309
static int magazine_obj_put(slab_cache_t *cache, void *obj)
301
{
310
{
302
    slab_magazine_t *mag;
311
    slab_magazine_t *mag;
303
 
312
 
304
    spinlock_lock(&cache->mag_cache[CPU->id].lock);
313
    spinlock_lock(&cache->mag_cache[CPU->id].lock);
305
   
314
   
306
    mag = cache->mag_cache[CPU->id].current;
315
    mag = cache->mag_cache[CPU->id].current;
307
    if (!mag) {
316
    if (!mag) {
308
        /* We do not want to sleep just because of caching */
317
        /* We do not want to sleep just because of caching */
309
        /* Especially we do not want reclaiming to start, as
318
        /* Especially we do not want reclaiming to start, as
310
         * this would deadlock */
319
         * this would deadlock */
311
        mag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
320
        mag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
312
        if (!mag) /* Allocation failed, give up on caching */
321
        if (!mag) /* Allocation failed, give up on caching */
313
            goto errout;
322
            goto errout;
314
 
323
 
315
        cache->mag_cache[CPU->id].current = mag;
324
        cache->mag_cache[CPU->id].current = mag;
316
        mag->size = SLAB_MAG_SIZE;
325
        mag->size = SLAB_MAG_SIZE;
317
        mag->busy = 0;
326
        mag->busy = 0;
318
    } else if (mag->busy == mag->size) {
327
    } else if (mag->busy == mag->size) {
319
        /* If the last is full | empty, allocate new */
328
        /* If the last is full | empty, allocate new */
320
        mag = cache->mag_cache[CPU->id].last;
329
        mag = cache->mag_cache[CPU->id].last;
321
        if (!mag || mag->size == mag->busy) {
330
        if (!mag || mag->size == mag->busy) {
322
            if (mag)
331
            if (mag)
323
                list_prepend(&mag->link, &cache->magazines);
332
                list_prepend(&mag->link, &cache->magazines);
324
 
333
 
325
            mag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
334
            mag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
326
            if (!mag)
335
            if (!mag)
327
                goto errout;
336
                goto errout;
328
           
337
           
329
            mag->size = SLAB_MAG_SIZE;
338
            mag->size = SLAB_MAG_SIZE;
330
            mag->busy = 0;
339
            mag->busy = 0;
331
            cache->mag_cache[CPU->id].last = mag;
340
            cache->mag_cache[CPU->id].last = mag;
332
        }
341
        }
333
        /* Exchange the 2 */
342
        /* Exchange the 2 */
334
        cache->mag_cache[CPU->id].last = cache->mag_cache[CPU->id].current;
343
        cache->mag_cache[CPU->id].last = cache->mag_cache[CPU->id].current;
335
        cache->mag_cache[CPU->id].current = mag;
344
        cache->mag_cache[CPU->id].current = mag;
336
    }
345
    }
337
    mag->objs[mag->busy++] = obj;
346
    mag->objs[mag->busy++] = obj;
338
 
347
 
339
    spinlock_unlock(&cache->mag_cache[CPU->id].lock);
348
    spinlock_unlock(&cache->mag_cache[CPU->id].lock);
-
 
349
    atomic_inc(&cache->cached_objs);
340
    return 0;
350
    return 0;
341
errout:
351
errout:
342
    spinlock_unlock(&cache->mag_cache[CPU->id].lock);
352
    spinlock_unlock(&cache->mag_cache[CPU->id].lock);
343
    return -1;
353
    return -1;
344
}
354
}
345
 
355
 
346
 
356
 
347
/**************************************/
357
/**************************************/
348
/* SLAB CACHE functions */
358
/* SLAB CACHE functions */
349
 
359
 
350
/** Return number of objects that fit in certain cache size */
360
/** Return number of objects that fit in certain cache size */
351
static int comp_objects(slab_cache_t *cache)
361
static int comp_objects(slab_cache_t *cache)
352
{
362
{
353
    if (cache->flags & SLAB_CACHE_SLINSIDE)
363
    if (cache->flags & SLAB_CACHE_SLINSIDE)
354
        return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
364
        return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
355
    else
365
    else
356
        return (PAGE_SIZE << cache->order) / cache->size;
366
        return (PAGE_SIZE << cache->order) / cache->size;
357
}
367
}
358
 
368
 
359
/** Return wasted space in slab */
369
/** Return wasted space in slab */
360
static int badness(slab_cache_t *cache)
370
static int badness(slab_cache_t *cache)
361
{
371
{
362
    int objects;
372
    int objects;
363
    int ssize;
373
    int ssize;
364
 
374
 
365
    objects = comp_objects(cache);
375
    objects = comp_objects(cache);
366
    ssize = PAGE_SIZE << cache->order;
376
    ssize = PAGE_SIZE << cache->order;
367
    if (cache->flags & SLAB_CACHE_SLINSIDE)
377
    if (cache->flags & SLAB_CACHE_SLINSIDE)
368
        ssize -= sizeof(slab_t);
378
        ssize -= sizeof(slab_t);
369
    return ssize - objects*cache->size;
379
    return ssize - objects*cache->size;
370
}
380
}
371
 
381
 
372
/** Initialize allocated memory as a slab cache */
382
/** Initialize allocated memory as a slab cache */
373
static void
383
static void
374
_slab_cache_create(slab_cache_t *cache,
384
_slab_cache_create(slab_cache_t *cache,
375
           char *name,
385
           char *name,
376
           size_t size,
386
           size_t size,
377
           size_t align,
387
           size_t align,
378
           int (*constructor)(void *obj, int kmflag),
388
           int (*constructor)(void *obj, int kmflag),
379
           void (*destructor)(void *obj),
389
           void (*destructor)(void *obj),
380
           int flags)
390
           int flags)
381
{
391
{
382
    int i;
392
    int i;
383
 
393
 
384
    memsetb((__address)cache, sizeof(*cache), 0);
394
    memsetb((__address)cache, sizeof(*cache), 0);
385
    cache->name = name;
395
    cache->name = name;
386
 
396
 
387
    if (align < sizeof(__native))
397
    if (align < sizeof(__native))
388
        align = sizeof(__native);
398
        align = sizeof(__native);
389
    size = ALIGN_UP(size, align);
399
    size = ALIGN_UP(size, align);
390
       
400
       
391
    cache->size = size;
401
    cache->size = size;
392
 
402
 
393
    cache->constructor = constructor;
403
    cache->constructor = constructor;
394
    cache->destructor = destructor;
404
    cache->destructor = destructor;
395
    cache->flags = flags;
405
    cache->flags = flags;
396
 
406
 
397
    list_initialize(&cache->full_slabs);
407
    list_initialize(&cache->full_slabs);
398
    list_initialize(&cache->partial_slabs);
408
    list_initialize(&cache->partial_slabs);
399
    list_initialize(&cache->magazines);
409
    list_initialize(&cache->magazines);
400
    spinlock_initialize(&cache->lock, "cachelock");
410
    spinlock_initialize(&cache->lock, "cachelock");
401
    if (! cache->flags & SLAB_CACHE_NOMAGAZINE) {
411
    if (! cache->flags & SLAB_CACHE_NOMAGAZINE) {
402
        for (i=0; i< config.cpu_count; i++)
412
        for (i=0; i< config.cpu_count; i++)
403
            spinlock_initialize(&cache->mag_cache[i].lock,
413
            spinlock_initialize(&cache->mag_cache[i].lock,
404
                        "cpucachelock");
414
                        "cpucachelock");
405
    }
415
    }
406
 
416
 
407
    /* Compute slab sizes, object counts in slabs etc. */
417
    /* Compute slab sizes, object counts in slabs etc. */
408
    if (cache->size < SLAB_INSIDE_SIZE)
418
    if (cache->size < SLAB_INSIDE_SIZE)
409
        cache->flags |= SLAB_CACHE_SLINSIDE;
419
        cache->flags |= SLAB_CACHE_SLINSIDE;
410
 
420
 
411
    /* Minimum slab order */
421
    /* Minimum slab order */
412
    cache->order = (cache->size-1) >> PAGE_WIDTH;
422
    cache->order = (cache->size-1) >> PAGE_WIDTH;
413
 
423
 
414
    while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
424
    while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
415
        cache->order += 1;
425
        cache->order += 1;
416
    }
426
    }
417
    cache->objects = comp_objects(cache);
427
    cache->objects = comp_objects(cache);
418
    /* If info fits in, put it inside */
428
    /* If info fits in, put it inside */
419
    if (badness(cache) > sizeof(slab_t))
429
    if (badness(cache) > sizeof(slab_t))
420
        cache->flags |= SLAB_CACHE_SLINSIDE;
430
        cache->flags |= SLAB_CACHE_SLINSIDE;
421
 
431
 
422
    spinlock_lock(&slab_cache_lock);
432
    spinlock_lock(&slab_cache_lock);
423
 
433
 
424
    list_append(&cache->link, &slab_cache_list);
434
    list_append(&cache->link, &slab_cache_list);
425
 
435
 
426
    spinlock_unlock(&slab_cache_lock);
436
    spinlock_unlock(&slab_cache_lock);
427
}
437
}
428
 
438
 
429
/** Create slab cache  */
439
/** Create slab cache  */
430
slab_cache_t * slab_cache_create(char *name,
440
slab_cache_t * slab_cache_create(char *name,
431
                 size_t size,
441
                 size_t size,
432
                 size_t align,
442
                 size_t align,
433
                 int (*constructor)(void *obj, int kmflag),
443
                 int (*constructor)(void *obj, int kmflag),
434
                 void (*destructor)(void *obj),
444
                 void (*destructor)(void *obj),
435
                 int flags)
445
                 int flags)
436
{
446
{
437
    slab_cache_t *cache;
447
    slab_cache_t *cache;
438
 
448
 
439
    cache = malloc(sizeof(*cache) + config.cpu_count*sizeof(cache->mag_cache[0]));
449
    cache = malloc(sizeof(*cache) + config.cpu_count*sizeof(cache->mag_cache[0]));
440
    _slab_cache_create(cache, name, size, align, constructor, destructor,
450
    _slab_cache_create(cache, name, size, align, constructor, destructor,
441
               flags);
451
               flags);
442
    return cache;
452
    return cache;
443
}
453
}
444
 
454
 
445
/**
455
/**
446
 * Reclaim space occupied by objects that are already free
456
 * Reclaim space occupied by objects that are already free
447
 *
457
 *
448
 * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing
458
 * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing
449
 * @return Number of freed pages
459
 * @return Number of freed pages
450
 *
460
 *
451
 * TODO: Add light reclaim
461
 * TODO: Add light reclaim
452
 */
462
 */
453
static count_t _slab_reclaim(slab_cache_t *cache, int flags)
463
static count_t _slab_reclaim(slab_cache_t *cache, int flags)
454
{
464
{
455
    int i;
465
    int i;
456
    slab_magazine_t *mag;
466
    slab_magazine_t *mag;
457
    link_t *cur;
467
    link_t *cur;
458
    count_t frames = 0;
468
    count_t frames = 0;
459
   
469
   
460
    if (cache->flags & SLAB_CACHE_NOMAGAZINE)
470
    if (cache->flags & SLAB_CACHE_NOMAGAZINE)
461
        return 0; /* Nothing to do */
471
        return 0; /* Nothing to do */
462
   
472
   
463
    /* First lock all cpu caches, then the complete cache lock */
473
    /* First lock all cpu caches, then the complete cache lock */
464
    for (i=0; i < config.cpu_count; i++)
474
    for (i=0; i < config.cpu_count; i++)
465
        spinlock_lock(&cache->mag_cache[i].lock);
475
        spinlock_lock(&cache->mag_cache[i].lock);
466
    spinlock_lock(&cache->lock);
476
    spinlock_lock(&cache->lock);
467
   
477
   
468
    if (flags & SLAB_RECLAIM_ALL) {
478
    if (flags & SLAB_RECLAIM_ALL) {
469
        /* Aggressive memfree */
479
        /* Aggressive memfree */
470
 
-
 
471
        /* Destroy CPU magazines */
480
        /* Destroy CPU magazines */
472
        for (i=0; i<config.cpu_count; i++) {
481
        for (i=0; i<config.cpu_count; i++) {
473
            mag = cache->mag_cache[i].current;
482
            mag = cache->mag_cache[i].current;
474
            if (mag)
483
            if (mag)
475
                frames += magazine_destroy(cache, mag);
484
                frames += magazine_destroy(cache, mag);
476
            cache->mag_cache[i].current = NULL;
485
            cache->mag_cache[i].current = NULL;
477
           
486
           
478
            mag = cache->mag_cache[i].last;
487
            mag = cache->mag_cache[i].last;
479
            if (mag)
488
            if (mag)
480
                frames += magazine_destroy(cache, mag);
489
                frames += magazine_destroy(cache, mag);
481
            cache->mag_cache[i].last = NULL;
490
            cache->mag_cache[i].last = NULL;
482
        }
491
        }
483
    }
492
    }
484
    /* Destroy full magazines */
493
    /* Destroy full magazines */
485
    cur=cache->magazines.prev;
494
    cur=cache->magazines.prev;
-
 
495
 
486
    while (cur!=&cache->magazines) {
496
    while (cur!=&cache->magazines) {
487
        mag = list_get_instance(cur, slab_magazine_t, link);
497
        mag = list_get_instance(cur, slab_magazine_t, link);
488
       
498
       
489
        cur = cur->prev;
499
        cur = cur->prev;
490
        list_remove(cur->next);
500
        list_remove(cur->next);
-
 
501
//      list_remove(&mag->link);
491
        frames += magazine_destroy(cache,mag);
502
        frames += magazine_destroy(cache,mag);
492
        /* If we do not do full reclaim, break
503
        /* If we do not do full reclaim, break
493
         * as soon as something is freed */
504
         * as soon as something is freed */
494
        if (!(flags & SLAB_RECLAIM_ALL) && frames)
505
        if (!(flags & SLAB_RECLAIM_ALL) && frames)
495
            break;
506
            break;
496
    }
507
    }
497
   
508
   
498
    spinlock_unlock(&cache->lock);
509
    spinlock_unlock(&cache->lock);
499
    for (i=0; i < config.cpu_count; i++)
510
    for (i=0; i < config.cpu_count; i++)
500
        spinlock_unlock(&cache->mag_cache[i].lock);
511
        spinlock_unlock(&cache->mag_cache[i].lock);
501
   
512
   
502
    return frames;
513
    return frames;
503
}
514
}
504
 
515
 
505
/** Check that there are no slabs and remove cache from system  */
516
/** Check that there are no slabs and remove cache from system  */
506
void slab_cache_destroy(slab_cache_t *cache)
517
void slab_cache_destroy(slab_cache_t *cache)
507
{
518
{
508
    /* Do not lock anything, we assume the software is correct and
519
    /* Do not lock anything, we assume the software is correct and
509
     * does not touch the cache when it decides to destroy it */
520
     * does not touch the cache when it decides to destroy it */
510
   
521
   
511
    /* Destroy all magazines */
522
    /* Destroy all magazines */
512
    _slab_reclaim(cache, SLAB_RECLAIM_ALL);
523
    _slab_reclaim(cache, SLAB_RECLAIM_ALL);
513
 
524
 
514
    /* All slabs must be empty */
525
    /* All slabs must be empty */
515
    if (!list_empty(&cache->full_slabs) \
526
    if (!list_empty(&cache->full_slabs) \
516
        || !list_empty(&cache->partial_slabs))
527
        || !list_empty(&cache->partial_slabs))
517
        panic("Destroying cache that is not empty.");
528
        panic("Destroying cache that is not empty.");
518
 
529
 
519
    spinlock_lock(&slab_cache_lock);
530
    spinlock_lock(&slab_cache_lock);
520
    list_remove(&cache->link);
531
    list_remove(&cache->link);
521
    spinlock_unlock(&slab_cache_lock);
532
    spinlock_unlock(&slab_cache_lock);
522
 
533
 
523
    free(cache);
534
    free(cache);
524
}
535
}
525
 
536
 
526
/** Allocate new object from cache - if no flags given, always returns
537
/** Allocate new object from cache - if no flags given, always returns
527
    memory */
538
    memory */
528
void * slab_alloc(slab_cache_t *cache, int flags)
539
void * slab_alloc(slab_cache_t *cache, int flags)
529
{
540
{
530
    ipl_t ipl;
541
    ipl_t ipl;
531
    void *result = NULL;
542
    void *result = NULL;
532
 
543
 
533
    /* Disable interrupts to avoid deadlocks with interrupt handlers */
544
    /* Disable interrupts to avoid deadlocks with interrupt handlers */
534
    ipl = interrupts_disable();
545
    ipl = interrupts_disable();
535
   
546
   
536
    if (!cache->flags & SLAB_CACHE_NOMAGAZINE)
547
    if (!cache->flags & SLAB_CACHE_NOMAGAZINE)
537
        result = magazine_obj_get(cache);
548
        result = magazine_obj_get(cache);
538
 
549
 
539
    if (!result) {
550
    if (!result) {
540
        spinlock_lock(&cache->lock);
551
        spinlock_lock(&cache->lock);
541
        result = slab_obj_create(cache, flags);
552
        result = slab_obj_create(cache, flags);
542
        spinlock_unlock(&cache->lock);
553
        spinlock_unlock(&cache->lock);
543
    }
554
    }
544
 
555
 
545
    if (result)
556
    if (result)
546
        atomic_inc(&cache->allocated_objs);
557
        atomic_inc(&cache->allocated_objs);
547
 
558
 
548
    interrupts_restore(ipl);
559
    interrupts_restore(ipl);
549
 
560
 
550
 
561
 
551
    return result;
562
    return result;
552
}
563
}
553
 
564
 
554
/** Return object to cache  */
565
/** Return object to cache  */
555
void slab_free(slab_cache_t *cache, void *obj)
566
void slab_free(slab_cache_t *cache, void *obj)
556
{
567
{
557
    ipl_t ipl;
568
    ipl_t ipl;
558
 
569
 
559
    ipl = interrupts_disable();
570
    ipl = interrupts_disable();
560
 
571
 
561
    if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
572
    if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
562
        || magazine_obj_put(cache, obj)) {
573
        || magazine_obj_put(cache, obj)) {
563
       
574
       
564
        spinlock_lock(&cache->lock);
575
        spinlock_lock(&cache->lock);
565
        slab_obj_destroy(cache, obj, NULL);
576
        slab_obj_destroy(cache, obj, NULL);
566
        spinlock_unlock(&cache->lock);
577
        spinlock_unlock(&cache->lock);
567
    }
578
    }
568
    atomic_dec(&cache->allocated_objs);
579
    atomic_dec(&cache->allocated_objs);
569
    interrupts_restore(ipl);
580
    interrupts_restore(ipl);
570
}
581
}
571
 
582
 
572
/* Go through all caches and reclaim what is possible */
583
/* Go through all caches and reclaim what is possible */
573
count_t slab_reclaim(int flags)
584
count_t slab_reclaim(int flags)
574
{
585
{
575
    slab_cache_t *cache;
586
    slab_cache_t *cache;
576
    link_t *cur;
587
    link_t *cur;
577
    count_t frames = 0;
588
    count_t frames = 0;
578
 
589
 
579
    spinlock_lock(&slab_cache_lock);
590
    spinlock_lock(&slab_cache_lock);
580
 
591
 
581
    for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
592
    for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
582
        cache = list_get_instance(cur, slab_cache_t, link);
593
        cache = list_get_instance(cur, slab_cache_t, link);
583
        frames += _slab_reclaim(cache, flags);
594
        frames += _slab_reclaim(cache, flags);
584
    }
595
    }
585
 
596
 
586
    spinlock_unlock(&slab_cache_lock);
597
    spinlock_unlock(&slab_cache_lock);
587
 
598
 
588
    return frames;
599
    return frames;
589
}
600
}
590
 
601
 
591
 
602
 
592
/* Print list of slabs */
603
/* Print list of slabs */
593
void slab_print_list(void)
604
void slab_print_list(void)
594
{
605
{
595
    slab_cache_t *cache;
606
    slab_cache_t *cache;
596
    link_t *cur;
607
    link_t *cur;
597
 
608
 
598
    spinlock_lock(&slab_cache_lock);
609
    spinlock_lock(&slab_cache_lock);
599
    printf("SLAB name\tOsize\tPages\tOcnt\tSlabs\tAllocobjs\tCtl\n");
610
    printf("SLAB name\tOsize\tPages\tObj/pg\tSlabs\tCached\tAllocobjs\tCtl\n");
600
    for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
611
    for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
601
        cache = list_get_instance(cur, slab_cache_t, link);
612
        cache = list_get_instance(cur, slab_cache_t, link);
602
        printf("%s\t%d\t%d\t%d\t%d\t%d\t\t%s\n", cache->name, cache->size,
613
        printf("%s\t%d\t%d\t%d\t%d\t%d\t%d\t\t%s\n", cache->name, cache->size,
603
               (1 << cache->order), cache->objects,
614
               (1 << cache->order), cache->objects,
604
               atomic_get(&cache->allocated_slabs),
615
               atomic_get(&cache->allocated_slabs),
-
 
616
               atomic_get(&cache->cached_objs),
605
               atomic_get(&cache->allocated_objs),
617
               atomic_get(&cache->allocated_objs),
606
               cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out");
618
               cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out");
607
    }
619
    }
608
    spinlock_unlock(&slab_cache_lock);
620
    spinlock_unlock(&slab_cache_lock);
609
}
621
}
610
 
622
 
611
void slab_cache_init(void)
623
void slab_cache_init(void)
612
{
624
{
613
    /* Initialize magazine cache */
625
    /* Initialize magazine cache */
614
    _slab_cache_create(&mag_cache,
626
    _slab_cache_create(&mag_cache,
615
               "slab_magazine",
627
               "slab_magazine",
616
               sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*),
628
               sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*),
617
               sizeof(__address),
629
               sizeof(__address),
618
               NULL, NULL,
630
               NULL, NULL,
619
               SLAB_CACHE_NOMAGAZINE);
631
               SLAB_CACHE_NOMAGAZINE);
620
 
632
 
621
    /* Initialize structures for malloc */
633
    /* Initialize structures for malloc */
622
}
634
}
623
 
635