Subversion Repositories HelenOS-historic

Rev

Rev 789 | Rev 814 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed

Rev 789 Rev 791
1
/*
1
/*
2
 * Copyright (C) 2006 Ondrej Palkovsky
2
 * Copyright (C) 2006 Ondrej Palkovsky
3
 * All rights reserved.
3
 * All rights reserved.
4
 *
4
 *
5
 * Redistribution and use in source and binary forms, with or without
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
6
 * modification, are permitted provided that the following conditions
7
 * are met:
7
 * are met:
8
 *
8
 *
9
 * - Redistributions of source code must retain the above copyright
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
15
 *   derived from this software without specific prior written permission.
16
 *
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
27
 */
28
 
28
 
29
/*
29
/*
30
 * The SLAB allocator is closely modelled after OpenSolaris SLAB allocator
30
 * The SLAB allocator is closely modelled after OpenSolaris SLAB allocator
31
 * http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/
31
 * http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/
32
 *
32
 *
33
 * with the following exceptions:
33
 * with the following exceptions:
34
 *   - empty SLABS are deallocated immediately
34
 *   - empty SLABS are deallocated immediately
35
 *     (in Linux they are kept in linked list, in Solaris ???)
35
 *     (in Linux they are kept in linked list, in Solaris ???)
36
 *   - empty magazines are deallocated when not needed
36
 *   - empty magazines are deallocated when not needed
37
 *     (in Solaris they are held in linked list in slab cache)
37
 *     (in Solaris they are held in linked list in slab cache)
38
 *
38
 *
39
 *   Following features are not currently supported but would be easy to do:
39
 *   Following features are not currently supported but would be easy to do:
40
 *   - cache coloring
40
 *   - cache coloring
41
 *   - dynamic magazine growing (different magazine sizes are already
41
 *   - dynamic magazine growing (different magazine sizes are already
42
 *     supported, but we would need to adjust allocating strategy)
42
 *     supported, but we would need to adjust allocating strategy)
43
 *
43
 *
44
 * The SLAB allocator supports per-CPU caches ('magazines') to facilitate
44
 * The SLAB allocator supports per-CPU caches ('magazines') to facilitate
45
 * good SMP scaling.
45
 * good SMP scaling.
46
 *
46
 *
47
 * When a new object is being allocated, it is first checked, if it is
47
 * When a new object is being allocated, it is first checked, if it is
48
 * available in CPU-bound magazine. If it is not found there, it is
48
 * available in CPU-bound magazine. If it is not found there, it is
49
 * allocated from CPU-shared SLAB - if partial full is found, it is used,
49
 * allocated from CPU-shared SLAB - if partial full is found, it is used,
50
 * otherwise a new one is allocated.
50
 * otherwise a new one is allocated.
51
 *
51
 *
52
 * When an object is being deallocated, it is put to CPU-bound magazine.
52
 * When an object is being deallocated, it is put to CPU-bound magazine.
53
 * If there is no such magazine, new one is allocated (if it fails,
53
 * If there is no such magazine, new one is allocated (if it fails,
54
 * the object is deallocated into SLAB). If the magazine is full, it is
54
 * the object is deallocated into SLAB). If the magazine is full, it is
55
 * put into cpu-shared list of magazines and new one is allocated.
55
 * put into cpu-shared list of magazines and new one is allocated.
56
 *
56
 *
57
 * The CPU-bound magazine is actually a pair of magazine to avoid
57
 * The CPU-bound magazine is actually a pair of magazine to avoid
58
 * thrashing when somebody is allocating/deallocating 1 item at the magazine
58
 * thrashing when somebody is allocating/deallocating 1 item at the magazine
59
 * size boundary. LIFO order is enforced, which should avoid fragmentation
59
 * size boundary. LIFO order is enforced, which should avoid fragmentation
60
 * as much as possible.
60
 * as much as possible.
61
 *  
61
 *  
62
 * Every cache contains list of full slabs and list of partialy full slabs.
62
 * Every cache contains list of full slabs and list of partialy full slabs.
63
 * Empty SLABS are immediately freed (thrashing will be avoided because
63
 * Empty SLABS are immediately freed (thrashing will be avoided because
64
 * of magazines).
64
 * of magazines).
65
 *
65
 *
66
 * The SLAB information structure is kept inside the data area, if possible.
66
 * The SLAB information structure is kept inside the data area, if possible.
67
 * The cache can be marked that it should not use magazines. This is used
67
 * The cache can be marked that it should not use magazines. This is used
68
 * only for SLAB related caches to avoid deadlocks and infinite recursion
68
 * only for SLAB related caches to avoid deadlocks and infinite recursion
69
 * (the SLAB allocator uses itself for allocating all it's control structures).
69
 * (the SLAB allocator uses itself for allocating all it's control structures).
70
 *
70
 *
71
 * The SLAB allocator allocates lot of space and does not free it. When
71
 * The SLAB allocator allocates lot of space and does not free it. When
72
 * frame allocator fails to allocate the frame, it calls slab_reclaim().
72
 * frame allocator fails to allocate the frame, it calls slab_reclaim().
73
 * It tries 'light reclaim' first, then brutal reclaim. The light reclaim
73
 * It tries 'light reclaim' first, then brutal reclaim. The light reclaim
74
 * releases slabs from cpu-shared magazine-list, until at least 1 slab
74
 * releases slabs from cpu-shared magazine-list, until at least 1 slab
75
 * is deallocated in each cache (this algorithm should probably change).
75
 * is deallocated in each cache (this algorithm should probably change).
76
 * The brutal reclaim removes all cached objects, even from CPU-bound
76
 * The brutal reclaim removes all cached objects, even from CPU-bound
77
 * magazines.
77
 * magazines.
78
 *
78
 *
79
 * TODO: For better CPU-scaling the magazine allocation strategy should
79
 * TODO: For better CPU-scaling the magazine allocation strategy should
80
 * be extended. Currently, if the cache does not have magazine, it asks
80
 * be extended. Currently, if the cache does not have magazine, it asks
81
 * for non-cpu cached magazine cache to provide one. It might be feasible
81
 * for non-cpu cached magazine cache to provide one. It might be feasible
82
 * to add cpu-cached magazine cache (which would allocate it's magazines
82
 * to add cpu-cached magazine cache (which would allocate it's magazines
83
 * from non-cpu-cached mag. cache). This would provide a nice per-cpu
83
 * from non-cpu-cached mag. cache). This would provide a nice per-cpu
84
 * buffer. The other possibility is to use the per-cache
84
 * buffer. The other possibility is to use the per-cache
85
 * 'empty-magazine-list', which decreases competing for 1 per-system
85
 * 'empty-magazine-list', which decreases competing for 1 per-system
86
 * magazine cache.
86
 * magazine cache.
87
 *
87
 *
88
 * - it might be good to add granularity of locks even to slab level,
88
 * - it might be good to add granularity of locks even to slab level,
89
 *   we could then try_spinlock over all partial slabs and thus improve
89
 *   we could then try_spinlock over all partial slabs and thus improve
90
 *   scalability even on slab level
90
 *   scalability even on slab level
91
 */
91
 */
92
 
92
 
93
 
93
 
94
#include <synch/spinlock.h>
94
#include <synch/spinlock.h>
95
#include <mm/slab.h>
95
#include <mm/slab.h>
96
#include <adt/list.h>
96
#include <adt/list.h>
97
#include <memstr.h>
97
#include <memstr.h>
98
#include <align.h>
98
#include <align.h>
99
#include <mm/heap.h>
99
#include <mm/heap.h>
100
#include <mm/frame.h>
100
#include <mm/frame.h>
101
#include <config.h>
101
#include <config.h>
102
#include <print.h>
102
#include <print.h>
103
#include <arch.h>
103
#include <arch.h>
104
#include <panic.h>
104
#include <panic.h>
105
#include <debug.h>
105
#include <debug.h>
106
#include <bitops.h>
106
#include <bitops.h>
107
 
107
 
108
SPINLOCK_INITIALIZE(slab_cache_lock);
108
SPINLOCK_INITIALIZE(slab_cache_lock);
109
static LIST_INITIALIZE(slab_cache_list);
109
static LIST_INITIALIZE(slab_cache_list);
110
 
110
 
111
/** Magazine cache */
111
/** Magazine cache */
112
static slab_cache_t mag_cache;
112
static slab_cache_t mag_cache;
113
/** Cache for cache descriptors */
113
/** Cache for cache descriptors */
114
static slab_cache_t slab_cache_cache;
114
static slab_cache_t slab_cache_cache;
115
/** Cache for magcache structure from cache_t */
-
 
116
static slab_cache_t *cpu_cache = NULL;
-
 
117
/** Cache for external slab descriptors
115
/** Cache for external slab descriptors
118
 * This time we want per-cpu cache, so do not make it static
116
 * This time we want per-cpu cache, so do not make it static
119
 * - using SLAB for internal SLAB structures will not deadlock,
117
 * - using SLAB for internal SLAB structures will not deadlock,
120
 *   as all slab structures are 'small' - control structures of
118
 *   as all slab structures are 'small' - control structures of
121
 *   their caches do not require further allocation
119
 *   their caches do not require further allocation
122
 */
120
 */
123
static slab_cache_t *slab_extern_cache;
121
static slab_cache_t *slab_extern_cache;
124
/** Caches for malloc */
122
/** Caches for malloc */
125
static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1];
123
static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1];
126
char *malloc_names[] =  {
124
char *malloc_names[] =  {
127
    "malloc-8","malloc-16","malloc-32","malloc-64","malloc-128",
125
    "malloc-16","malloc-32","malloc-64","malloc-128",
128
    "malloc-256","malloc-512","malloc-1K","malloc-2K",
126
    "malloc-256","malloc-512","malloc-1K","malloc-2K",
129
    "malloc-4K","malloc-8K","malloc-16K","malloc-32K",
127
    "malloc-4K","malloc-8K","malloc-16K","malloc-32K",
130
    "malloc-64K","malloc-128K"
128
    "malloc-64K","malloc-128K"
131
};
129
};
132
 
130
 
133
/** Slab descriptor */
131
/** Slab descriptor */
134
typedef struct {
132
typedef struct {
135
    slab_cache_t *cache; /**< Pointer to parent cache */
133
    slab_cache_t *cache; /**< Pointer to parent cache */
136
    link_t link;       /* List of full/partial slabs */
134
    link_t link;       /* List of full/partial slabs */
137
    void *start;       /**< Start address of first available item */
135
    void *start;       /**< Start address of first available item */
138
    count_t available; /**< Count of available items in this slab */
136
    count_t available; /**< Count of available items in this slab */
139
    index_t nextavail; /**< The index of next available item */
137
    index_t nextavail; /**< The index of next available item */
140
}slab_t;
138
}slab_t;
141
 
139
 
-
 
140
#ifdef CONFIG_DEBUG
-
 
141
static int _slab_initialized = 0;
-
 
142
#endif
-
 
143
 
142
/**************************************/
144
/**************************************/
143
/* SLAB allocation functions          */
145
/* SLAB allocation functions          */
144
 
146
 
145
/**
147
/**
146
 * Allocate frames for slab space and initialize
148
 * Allocate frames for slab space and initialize
147
 *
149
 *
148
 */
150
 */
149
static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
151
static slab_t * slab_space_alloc(slab_cache_t *cache, int flags)
150
{
152
{
151
    void *data;
153
    void *data;
152
    slab_t *slab;
154
    slab_t *slab;
153
    size_t fsize;
155
    size_t fsize;
154
    int i;
156
    int i;
155
    zone_t *zone = NULL;
157
    zone_t *zone = NULL;
156
    int status;
158
    int status;
157
    frame_t *frame;
159
    frame_t *frame;
158
 
160
 
159
    data = (void *)frame_alloc_rc_zone(cache->order, FRAME_KA | flags, &status, &zone);
161
    data = (void *)frame_alloc_rc_zone(cache->order, FRAME_KA | flags, &status, &zone);
160
    if (status != FRAME_OK) {
162
    if (status != FRAME_OK) {
161
        return NULL;
163
        return NULL;
162
    }
164
    }
163
    if (! (cache->flags & SLAB_CACHE_SLINSIDE)) {
165
    if (! (cache->flags & SLAB_CACHE_SLINSIDE)) {
164
        slab = slab_alloc(slab_extern_cache, flags);
166
        slab = slab_alloc(slab_extern_cache, flags);
165
        if (!slab) {
167
        if (!slab) {
166
            frame_free((__address)data);
168
            frame_free((__address)data);
167
            return NULL;
169
            return NULL;
168
        }
170
        }
169
    } else {
171
    } else {
170
        fsize = (PAGE_SIZE << cache->order);
172
        fsize = (PAGE_SIZE << cache->order);
171
        slab = data + fsize - sizeof(*slab);
173
        slab = data + fsize - sizeof(*slab);
172
    }
174
    }
173
       
175
       
174
    /* Fill in slab structures */
176
    /* Fill in slab structures */
175
    /* TODO: some better way of accessing the frame */
177
    /* TODO: some better way of accessing the frame */
176
    for (i=0; i < (1 << cache->order); i++) {
178
    for (i=0; i < (1 << cache->order); i++) {
177
        frame = ADDR2FRAME(zone, KA2PA((__address)(data+i*PAGE_SIZE)));
179
        frame = ADDR2FRAME(zone, KA2PA((__address)(data+i*PAGE_SIZE)));
178
        frame->parent = slab;
180
        frame->parent = slab;
179
    }
181
    }
180
 
182
 
181
    slab->start = data;
183
    slab->start = data;
182
    slab->available = cache->objects;
184
    slab->available = cache->objects;
183
    slab->nextavail = 0;
185
    slab->nextavail = 0;
184
    slab->cache = cache;
186
    slab->cache = cache;
185
 
187
 
186
    for (i=0; i<cache->objects;i++)
188
    for (i=0; i<cache->objects;i++)
187
        *((int *) (slab->start + i*cache->size)) = i+1;
189
        *((int *) (slab->start + i*cache->size)) = i+1;
188
 
190
 
189
    atomic_inc(&cache->allocated_slabs);
191
    atomic_inc(&cache->allocated_slabs);
190
    return slab;
192
    return slab;
191
}
193
}
192
 
194
 
193
/**
195
/**
194
 * Deallocate space associated with SLAB
196
 * Deallocate space associated with SLAB
195
 *
197
 *
196
 * @return number of freed frames
198
 * @return number of freed frames
197
 */
199
 */
198
static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
200
static count_t slab_space_free(slab_cache_t *cache, slab_t *slab)
199
{
201
{
200
    frame_free((__address)slab->start);
202
    frame_free((__address)slab->start);
201
    if (! (cache->flags & SLAB_CACHE_SLINSIDE))
203
    if (! (cache->flags & SLAB_CACHE_SLINSIDE))
202
        slab_free(slab_extern_cache, slab);
204
        slab_free(slab_extern_cache, slab);
203
 
205
 
204
    atomic_dec(&cache->allocated_slabs);
206
    atomic_dec(&cache->allocated_slabs);
205
   
207
   
206
    return 1 << cache->order;
208
    return 1 << cache->order;
207
}
209
}
208
 
210
 
209
/** Map object to slab structure */
211
/** Map object to slab structure */
210
static slab_t * obj2slab(void *obj)
212
static slab_t * obj2slab(void *obj)
211
{
213
{
212
    frame_t *frame;
214
    frame_t *frame;
213
 
215
 
214
    frame = frame_addr2frame((__address)obj);
216
    frame = frame_addr2frame((__address)obj);
215
    return (slab_t *)frame->parent;
217
    return (slab_t *)frame->parent;
216
}
218
}
217
 
219
 
218
/**************************************/
220
/**************************************/
219
/* SLAB functions */
221
/* SLAB functions */
220
 
222
 
221
 
223
 
222
/**
224
/**
223
 * Return object to slab and call a destructor
225
 * Return object to slab and call a destructor
224
 *
226
 *
225
 * @param slab If the caller knows directly slab of the object, otherwise NULL
227
 * @param slab If the caller knows directly slab of the object, otherwise NULL
226
 *
228
 *
227
 * @return Number of freed pages
229
 * @return Number of freed pages
228
 */
230
 */
229
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
231
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj,
230
                slab_t *slab)
232
                slab_t *slab)
231
{
233
{
232
    int freed = 0;
234
    int freed = 0;
233
 
235
 
234
    if (!slab)
236
    if (!slab)
235
        slab = obj2slab(obj);
237
        slab = obj2slab(obj);
236
 
238
 
237
    ASSERT(slab->cache == cache);
239
    ASSERT(slab->cache == cache);
238
 
240
 
239
    if (cache->destructor)
241
    if (cache->destructor)
240
        freed = cache->destructor(obj);
242
        freed = cache->destructor(obj);
241
   
243
   
242
    spinlock_lock(&cache->slablock);
244
    spinlock_lock(&cache->slablock);
243
    ASSERT(slab->available < cache->objects);
245
    ASSERT(slab->available < cache->objects);
244
 
246
 
245
    *((int *)obj) = slab->nextavail;
247
    *((int *)obj) = slab->nextavail;
246
    slab->nextavail = (obj - slab->start)/cache->size;
248
    slab->nextavail = (obj - slab->start)/cache->size;
247
    slab->available++;
249
    slab->available++;
248
 
250
 
249
    /* Move it to correct list */
251
    /* Move it to correct list */
250
    if (slab->available == cache->objects) {
252
    if (slab->available == cache->objects) {
251
        /* Free associated memory */
253
        /* Free associated memory */
252
        list_remove(&slab->link);
254
        list_remove(&slab->link);
253
        spinlock_unlock(&cache->slablock);
255
        spinlock_unlock(&cache->slablock);
254
 
256
 
255
        return freed + slab_space_free(cache, slab);
257
        return freed + slab_space_free(cache, slab);
256
 
258
 
257
    } else if (slab->available == 1) {
259
    } else if (slab->available == 1) {
258
        /* It was in full, move to partial */
260
        /* It was in full, move to partial */
259
        list_remove(&slab->link);
261
        list_remove(&slab->link);
260
        list_prepend(&slab->link, &cache->partial_slabs);
262
        list_prepend(&slab->link, &cache->partial_slabs);
261
    }
263
    }
262
    spinlock_unlock(&cache->slablock);
264
    spinlock_unlock(&cache->slablock);
263
    return freed;
265
    return freed;
264
}
266
}
265
 
267
 
266
/**
268
/**
267
 * Take new object from slab or create new if needed
269
 * Take new object from slab or create new if needed
268
 *
270
 *
269
 * @return Object address or null
271
 * @return Object address or null
270
 */
272
 */
271
static void * slab_obj_create(slab_cache_t *cache, int flags)
273
static void * slab_obj_create(slab_cache_t *cache, int flags)
272
{
274
{
273
    slab_t *slab;
275
    slab_t *slab;
274
    void *obj;
276
    void *obj;
275
 
277
 
276
    spinlock_lock(&cache->slablock);
278
    spinlock_lock(&cache->slablock);
277
 
279
 
278
    if (list_empty(&cache->partial_slabs)) {
280
    if (list_empty(&cache->partial_slabs)) {
279
        /* Allow recursion and reclaiming
281
        /* Allow recursion and reclaiming
280
         * - this should work, as the SLAB control structures
282
         * - this should work, as the SLAB control structures
281
         *   are small and do not need to allocte with anything
283
         *   are small and do not need to allocte with anything
282
         *   other ten frame_alloc when they are allocating,
284
         *   other ten frame_alloc when they are allocating,
283
         *   that's why we should get recursion at most 1-level deep
285
         *   that's why we should get recursion at most 1-level deep
284
         */
286
         */
285
        spinlock_unlock(&cache->slablock);
287
        spinlock_unlock(&cache->slablock);
286
        slab = slab_space_alloc(cache, flags);
288
        slab = slab_space_alloc(cache, flags);
287
        if (!slab)
289
        if (!slab)
288
            return NULL;
290
            return NULL;
289
        spinlock_lock(&cache->slablock);
291
        spinlock_lock(&cache->slablock);
290
    } else {
292
    } else {
291
        slab = list_get_instance(cache->partial_slabs.next,
293
        slab = list_get_instance(cache->partial_slabs.next,
292
                     slab_t,
294
                     slab_t,
293
                     link);
295
                     link);
294
        list_remove(&slab->link);
296
        list_remove(&slab->link);
295
    }
297
    }
296
    obj = slab->start + slab->nextavail * cache->size;
298
    obj = slab->start + slab->nextavail * cache->size;
297
    slab->nextavail = *((int *)obj);
299
    slab->nextavail = *((int *)obj);
298
    slab->available--;
300
    slab->available--;
299
 
301
 
300
    if (! slab->available)
302
    if (! slab->available)
301
        list_prepend(&slab->link, &cache->full_slabs);
303
        list_prepend(&slab->link, &cache->full_slabs);
302
    else
304
    else
303
        list_prepend(&slab->link, &cache->partial_slabs);
305
        list_prepend(&slab->link, &cache->partial_slabs);
304
 
306
 
305
    spinlock_unlock(&cache->slablock);
307
    spinlock_unlock(&cache->slablock);
306
 
308
 
307
    if (cache->constructor && cache->constructor(obj, flags)) {
309
    if (cache->constructor && cache->constructor(obj, flags)) {
308
        /* Bad, bad, construction failed */
310
        /* Bad, bad, construction failed */
309
        slab_obj_destroy(cache, obj, slab);
311
        slab_obj_destroy(cache, obj, slab);
310
        return NULL;
312
        return NULL;
311
    }
313
    }
312
    return obj;
314
    return obj;
313
}
315
}
314
 
316
 
315
/**************************************/
317
/**************************************/
316
/* CPU-Cache slab functions */
318
/* CPU-Cache slab functions */
317
 
319
 
318
/**
320
/**
319
 * Finds a full magazine in cache, takes it from list
321
 * Finds a full magazine in cache, takes it from list
320
 * and returns it
322
 * and returns it
321
 *
323
 *
322
 * @param first If true, return first, else last mag
324
 * @param first If true, return first, else last mag
323
 */
325
 */
324
static slab_magazine_t * get_mag_from_cache(slab_cache_t *cache,
326
static slab_magazine_t * get_mag_from_cache(slab_cache_t *cache,
325
                        int first)
327
                        int first)
326
{
328
{
327
    slab_magazine_t *mag = NULL;
329
    slab_magazine_t *mag = NULL;
328
    link_t *cur;
330
    link_t *cur;
329
 
331
 
330
    spinlock_lock(&cache->maglock);
332
    spinlock_lock(&cache->maglock);
331
    if (!list_empty(&cache->magazines)) {
333
    if (!list_empty(&cache->magazines)) {
332
        if (first)
334
        if (first)
333
            cur = cache->magazines.next;
335
            cur = cache->magazines.next;
334
        else
336
        else
335
            cur = cache->magazines.prev;
337
            cur = cache->magazines.prev;
336
        mag = list_get_instance(cur, slab_magazine_t, link);
338
        mag = list_get_instance(cur, slab_magazine_t, link);
337
        list_remove(&mag->link);
339
        list_remove(&mag->link);
338
        atomic_dec(&cache->magazine_counter);
340
        atomic_dec(&cache->magazine_counter);
339
    }
341
    }
340
    spinlock_unlock(&cache->maglock);
342
    spinlock_unlock(&cache->maglock);
341
    return mag;
343
    return mag;
342
}
344
}
343
 
345
 
344
/** Prepend magazine to magazine list in cache */
346
/** Prepend magazine to magazine list in cache */
345
static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag)
347
static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag)
346
{
348
{
347
    spinlock_lock(&cache->maglock);
349
    spinlock_lock(&cache->maglock);
348
 
350
 
349
    list_prepend(&mag->link, &cache->magazines);
351
    list_prepend(&mag->link, &cache->magazines);
350
    atomic_inc(&cache->magazine_counter);
352
    atomic_inc(&cache->magazine_counter);
351
   
353
   
352
    spinlock_unlock(&cache->maglock);
354
    spinlock_unlock(&cache->maglock);
353
}
355
}
354
 
356
 
355
/**
357
/**
356
 * Free all objects in magazine and free memory associated with magazine
358
 * Free all objects in magazine and free memory associated with magazine
357
 *
359
 *
358
 * @return Number of freed pages
360
 * @return Number of freed pages
359
 */
361
 */
360
static count_t magazine_destroy(slab_cache_t *cache,
362
static count_t magazine_destroy(slab_cache_t *cache,
361
                slab_magazine_t *mag)
363
                slab_magazine_t *mag)
362
{
364
{
363
    int i;
365
    int i;
364
    count_t frames = 0;
366
    count_t frames = 0;
365
 
367
 
366
    for (i=0;i < mag->busy; i++) {
368
    for (i=0;i < mag->busy; i++) {
367
        frames += slab_obj_destroy(cache, mag->objs[i], NULL);
369
        frames += slab_obj_destroy(cache, mag->objs[i], NULL);
368
        atomic_dec(&cache->cached_objs);
370
        atomic_dec(&cache->cached_objs);
369
    }
371
    }
370
   
372
   
371
    slab_free(&mag_cache, mag);
373
    slab_free(&mag_cache, mag);
372
 
374
 
373
    return frames;
375
    return frames;
374
}
376
}
375
 
377
 
376
/**
378
/**
377
 * Find full magazine, set it as current and return it
379
 * Find full magazine, set it as current and return it
378
 *
380
 *
379
 * Assume cpu_magazine lock is held
381
 * Assume cpu_magazine lock is held
380
 */
382
 */
381
static slab_magazine_t * get_full_current_mag(slab_cache_t *cache)
383
static slab_magazine_t * get_full_current_mag(slab_cache_t *cache)
382
{
384
{
383
    slab_magazine_t *cmag, *lastmag, *newmag;
385
    slab_magazine_t *cmag, *lastmag, *newmag;
384
 
386
 
385
    cmag = cache->mag_cache[CPU->id].current;
387
    cmag = cache->mag_cache[CPU->id].current;
386
    lastmag = cache->mag_cache[CPU->id].last;
388
    lastmag = cache->mag_cache[CPU->id].last;
387
    if (cmag) { /* First try local CPU magazines */
389
    if (cmag) { /* First try local CPU magazines */
388
        if (cmag->busy)
390
        if (cmag->busy)
389
            return cmag;
391
            return cmag;
390
 
392
 
391
        if (lastmag && lastmag->busy) {
393
        if (lastmag && lastmag->busy) {
392
            cache->mag_cache[CPU->id].current = lastmag;
394
            cache->mag_cache[CPU->id].current = lastmag;
393
            cache->mag_cache[CPU->id].last = cmag;
395
            cache->mag_cache[CPU->id].last = cmag;
394
            return lastmag;
396
            return lastmag;
395
        }
397
        }
396
    }
398
    }
397
    /* Local magazines are empty, import one from magazine list */
399
    /* Local magazines are empty, import one from magazine list */
398
    newmag = get_mag_from_cache(cache, 1);
400
    newmag = get_mag_from_cache(cache, 1);
399
    if (!newmag)
401
    if (!newmag)
400
        return NULL;
402
        return NULL;
401
 
403
 
402
    if (lastmag)
404
    if (lastmag)
403
        magazine_destroy(cache, lastmag);
405
        magazine_destroy(cache, lastmag);
404
 
406
 
405
    cache->mag_cache[CPU->id].last = cmag;
407
    cache->mag_cache[CPU->id].last = cmag;
406
    cache->mag_cache[CPU->id].current = newmag;
408
    cache->mag_cache[CPU->id].current = newmag;
407
    return newmag;
409
    return newmag;
408
}
410
}
409
 
411
 
410
/**
412
/**
411
 * Try to find object in CPU-cache magazines
413
 * Try to find object in CPU-cache magazines
412
 *
414
 *
413
 * @return Pointer to object or NULL if not available
415
 * @return Pointer to object or NULL if not available
414
 */
416
 */
415
static void * magazine_obj_get(slab_cache_t *cache)
417
static void * magazine_obj_get(slab_cache_t *cache)
416
{
418
{
417
    slab_magazine_t *mag;
419
    slab_magazine_t *mag;
418
    void *obj;
420
    void *obj;
419
 
421
 
420
    if (!CPU)
422
    if (!CPU)
421
        return NULL;
423
        return NULL;
422
 
424
 
423
    spinlock_lock(&cache->mag_cache[CPU->id].lock);
425
    spinlock_lock(&cache->mag_cache[CPU->id].lock);
424
 
426
 
425
    mag = get_full_current_mag(cache);
427
    mag = get_full_current_mag(cache);
426
    if (!mag) {
428
    if (!mag) {
427
        spinlock_unlock(&cache->mag_cache[CPU->id].lock);
429
        spinlock_unlock(&cache->mag_cache[CPU->id].lock);
428
        return NULL;
430
        return NULL;
429
    }
431
    }
430
    obj = mag->objs[--mag->busy];
432
    obj = mag->objs[--mag->busy];
431
    spinlock_unlock(&cache->mag_cache[CPU->id].lock);
433
    spinlock_unlock(&cache->mag_cache[CPU->id].lock);
432
    atomic_dec(&cache->cached_objs);
434
    atomic_dec(&cache->cached_objs);
433
   
435
   
434
    return obj;
436
    return obj;
435
}
437
}
436
 
438
 
437
/**
439
/**
438
 * Assure that the current magazine is empty, return pointer to it, or NULL if
440
 * Assure that the current magazine is empty, return pointer to it, or NULL if
439
 * no empty magazine is available and cannot be allocated
441
 * no empty magazine is available and cannot be allocated
440
 *
442
 *
441
 * Assume mag_cache[CPU->id].lock is held
443
 * Assume mag_cache[CPU->id].lock is held
442
 *
444
 *
443
 * We have 2 magazines bound to processor.
445
 * We have 2 magazines bound to processor.
444
 * First try the current.
446
 * First try the current.
445
 *  If full, try the last.
447
 *  If full, try the last.
446
 *   If full, put to magazines list.
448
 *   If full, put to magazines list.
447
 *   allocate new, exchange last & current
449
 *   allocate new, exchange last & current
448
 *
450
 *
449
 */
451
 */
450
static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache)
452
static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache)
451
{
453
{
452
    slab_magazine_t *cmag,*lastmag,*newmag;
454
    slab_magazine_t *cmag,*lastmag,*newmag;
453
 
455
 
454
    cmag = cache->mag_cache[CPU->id].current;
456
    cmag = cache->mag_cache[CPU->id].current;
455
    lastmag = cache->mag_cache[CPU->id].last;
457
    lastmag = cache->mag_cache[CPU->id].last;
456
 
458
 
457
    if (cmag) {
459
    if (cmag) {
458
        if (cmag->busy < cmag->size)
460
        if (cmag->busy < cmag->size)
459
            return cmag;
461
            return cmag;
460
        if (lastmag && lastmag->busy < lastmag->size) {
462
        if (lastmag && lastmag->busy < lastmag->size) {
461
            cache->mag_cache[CPU->id].last = cmag;
463
            cache->mag_cache[CPU->id].last = cmag;
462
            cache->mag_cache[CPU->id].current = lastmag;
464
            cache->mag_cache[CPU->id].current = lastmag;
463
            return lastmag;
465
            return lastmag;
464
        }
466
        }
465
    }
467
    }
466
    /* current | last are full | nonexistent, allocate new */
468
    /* current | last are full | nonexistent, allocate new */
467
    /* We do not want to sleep just because of caching */
469
    /* We do not want to sleep just because of caching */
468
    /* Especially we do not want reclaiming to start, as
470
    /* Especially we do not want reclaiming to start, as
469
     * this would deadlock */
471
     * this would deadlock */
470
    newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
472
    newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM);
471
    if (!newmag)
473
    if (!newmag)
472
        return NULL;
474
        return NULL;
473
    newmag->size = SLAB_MAG_SIZE;
475
    newmag->size = SLAB_MAG_SIZE;
474
    newmag->busy = 0;
476
    newmag->busy = 0;
475
 
477
 
476
    /* Flush last to magazine list */
478
    /* Flush last to magazine list */
477
    if (lastmag)
479
    if (lastmag)
478
        put_mag_to_cache(cache, lastmag);
480
        put_mag_to_cache(cache, lastmag);
479
 
481
 
480
    /* Move current as last, save new as current */
482
    /* Move current as last, save new as current */
481
    cache->mag_cache[CPU->id].last = cmag; 
483
    cache->mag_cache[CPU->id].last = cmag; 
482
    cache->mag_cache[CPU->id].current = newmag;
484
    cache->mag_cache[CPU->id].current = newmag;
483
 
485
 
484
    return newmag;
486
    return newmag;
485
}
487
}
486
 
488
 
487
/**
489
/**
488
 * Put object into CPU-cache magazine
490
 * Put object into CPU-cache magazine
489
 *
491
 *
490
 * @return 0 - success, -1 - could not get memory
492
 * @return 0 - success, -1 - could not get memory
491
 */
493
 */
492
static int magazine_obj_put(slab_cache_t *cache, void *obj)
494
static int magazine_obj_put(slab_cache_t *cache, void *obj)
493
{
495
{
494
    slab_magazine_t *mag;
496
    slab_magazine_t *mag;
495
 
497
 
496
    if (!CPU)
498
    if (!CPU)
497
        return -1;
499
        return -1;
498
 
500
 
499
    spinlock_lock(&cache->mag_cache[CPU->id].lock);
501
    spinlock_lock(&cache->mag_cache[CPU->id].lock);
500
 
502
 
501
    mag = make_empty_current_mag(cache);
503
    mag = make_empty_current_mag(cache);
502
    if (!mag) {
504
    if (!mag) {
503
        spinlock_unlock(&cache->mag_cache[CPU->id].lock);
505
        spinlock_unlock(&cache->mag_cache[CPU->id].lock);
504
        return -1;
506
        return -1;
505
    }
507
    }
506
   
508
   
507
    mag->objs[mag->busy++] = obj;
509
    mag->objs[mag->busy++] = obj;
508
 
510
 
509
    spinlock_unlock(&cache->mag_cache[CPU->id].lock);
511
    spinlock_unlock(&cache->mag_cache[CPU->id].lock);
510
    atomic_inc(&cache->cached_objs);
512
    atomic_inc(&cache->cached_objs);
511
    return 0;
513
    return 0;
512
}
514
}
513
 
515
 
514
 
516
 
515
/**************************************/
517
/**************************************/
516
/* SLAB CACHE functions */
518
/* SLAB CACHE functions */
517
 
519
 
518
/** Return number of objects that fit in certain cache size */
520
/** Return number of objects that fit in certain cache size */
519
static int comp_objects(slab_cache_t *cache)
521
static int comp_objects(slab_cache_t *cache)
520
{
522
{
521
    if (cache->flags & SLAB_CACHE_SLINSIDE)
523
    if (cache->flags & SLAB_CACHE_SLINSIDE)
522
        return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
524
        return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size;
523
    else
525
    else
524
        return (PAGE_SIZE << cache->order) / cache->size;
526
        return (PAGE_SIZE << cache->order) / cache->size;
525
}
527
}
526
 
528
 
527
/** Return wasted space in slab */
529
/** Return wasted space in slab */
528
static int badness(slab_cache_t *cache)
530
static int badness(slab_cache_t *cache)
529
{
531
{
530
    int objects;
532
    int objects;
531
    int ssize;
533
    int ssize;
532
 
534
 
533
    objects = comp_objects(cache);
535
    objects = comp_objects(cache);
534
    ssize = PAGE_SIZE << cache->order;
536
    ssize = PAGE_SIZE << cache->order;
535
    if (cache->flags & SLAB_CACHE_SLINSIDE)
537
    if (cache->flags & SLAB_CACHE_SLINSIDE)
536
        ssize -= sizeof(slab_t);
538
        ssize -= sizeof(slab_t);
537
    return ssize - objects*cache->size;
539
    return ssize - objects*cache->size;
538
}
540
}
539
 
541
 
540
/**
542
/**
541
 * Initialize mag_cache structure in slab cache
543
 * Initialize mag_cache structure in slab cache
542
 */
544
 */
543
static void make_magcache(slab_cache_t *cache)
545
static void make_magcache(slab_cache_t *cache)
544
{
546
{
545
    int i;
547
    int i;
-
 
548
   
-
 
549
    ASSERT(_slab_initialized >= 2);
546
 
550
 
547
    ASSERT(cpu_cache);
-
 
548
    cache->mag_cache = slab_alloc(cpu_cache, 0);
551
    cache->mag_cache = kalloc(sizeof(slab_mag_cache_t)*config.cpu_count,0);
549
    for (i=0; i < config.cpu_count; i++) {
552
    for (i=0; i < config.cpu_count; i++) {
550
        memsetb((__address)&cache->mag_cache[i],
553
        memsetb((__address)&cache->mag_cache[i],
551
            sizeof(cache->mag_cache[i]), 0);
554
            sizeof(cache->mag_cache[i]), 0);
552
        spinlock_initialize(&cache->mag_cache[i].lock,
555
        spinlock_initialize(&cache->mag_cache[i].lock,
553
                    "slab_maglock_cpu");
556
                    "slab_maglock_cpu");
554
    }
557
    }
555
}
558
}
556
 
559
 
557
/** Initialize allocated memory as a slab cache */
560
/** Initialize allocated memory as a slab cache */
558
static void
561
static void
559
_slab_cache_create(slab_cache_t *cache,
562
_slab_cache_create(slab_cache_t *cache,
560
           char *name,
563
           char *name,
561
           size_t size,
564
           size_t size,
562
           size_t align,
565
           size_t align,
563
           int (*constructor)(void *obj, int kmflag),
566
           int (*constructor)(void *obj, int kmflag),
564
           int (*destructor)(void *obj),
567
           int (*destructor)(void *obj),
565
           int flags)
568
           int flags)
566
{
569
{
567
    int pages;
570
    int pages;
568
    ipl_t ipl;
571
    ipl_t ipl;
569
 
572
 
570
    memsetb((__address)cache, sizeof(*cache), 0);
573
    memsetb((__address)cache, sizeof(*cache), 0);
571
    cache->name = name;
574
    cache->name = name;
572
 
575
 
573
    if (align < sizeof(__native))
576
    if (align < sizeof(__native))
574
        align = sizeof(__native);
577
        align = sizeof(__native);
575
    size = ALIGN_UP(size, align);
578
    size = ALIGN_UP(size, align);
576
       
579
       
577
    cache->size = size;
580
    cache->size = size;
578
 
581
 
579
    cache->constructor = constructor;
582
    cache->constructor = constructor;
580
    cache->destructor = destructor;
583
    cache->destructor = destructor;
581
    cache->flags = flags;
584
    cache->flags = flags;
582
 
585
 
583
    list_initialize(&cache->full_slabs);
586
    list_initialize(&cache->full_slabs);
584
    list_initialize(&cache->partial_slabs);
587
    list_initialize(&cache->partial_slabs);
585
    list_initialize(&cache->magazines);
588
    list_initialize(&cache->magazines);
586
    spinlock_initialize(&cache->slablock, "slab_lock");
589
    spinlock_initialize(&cache->slablock, "slab_lock");
587
    spinlock_initialize(&cache->maglock, "slab_maglock");
590
    spinlock_initialize(&cache->maglock, "slab_maglock");
588
    if (! (cache->flags & SLAB_CACHE_NOMAGAZINE))
591
    if (! (cache->flags & SLAB_CACHE_NOMAGAZINE))
589
        make_magcache(cache);
592
        make_magcache(cache);
590
 
593
 
591
    /* Compute slab sizes, object counts in slabs etc. */
594
    /* Compute slab sizes, object counts in slabs etc. */
592
    if (cache->size < SLAB_INSIDE_SIZE)
595
    if (cache->size < SLAB_INSIDE_SIZE)
593
        cache->flags |= SLAB_CACHE_SLINSIDE;
596
        cache->flags |= SLAB_CACHE_SLINSIDE;
594
 
597
 
595
    /* Minimum slab order */
598
    /* Minimum slab order */
596
    pages = ((cache->size-1) >> PAGE_WIDTH) + 1;
599
    pages = ((cache->size-1) >> PAGE_WIDTH) + 1;
597
    cache->order = fnzb(pages);
600
    cache->order = fnzb(pages);
598
 
601
 
599
    while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
602
    while (badness(cache) > SLAB_MAX_BADNESS(cache)) {
600
        cache->order += 1;
603
        cache->order += 1;
601
    }
604
    }
602
    cache->objects = comp_objects(cache);
605
    cache->objects = comp_objects(cache);
603
    /* If info fits in, put it inside */
606
    /* If info fits in, put it inside */
604
    if (badness(cache) > sizeof(slab_t))
607
    if (badness(cache) > sizeof(slab_t))
605
        cache->flags |= SLAB_CACHE_SLINSIDE;
608
        cache->flags |= SLAB_CACHE_SLINSIDE;
606
 
609
 
607
    /* Add cache to cache list */
610
    /* Add cache to cache list */
608
    ipl = interrupts_disable();
611
    ipl = interrupts_disable();
609
    spinlock_lock(&slab_cache_lock);
612
    spinlock_lock(&slab_cache_lock);
610
 
613
 
611
    list_append(&cache->link, &slab_cache_list);
614
    list_append(&cache->link, &slab_cache_list);
612
 
615
 
613
    spinlock_unlock(&slab_cache_lock);
616
    spinlock_unlock(&slab_cache_lock);
614
    interrupts_restore(ipl);
617
    interrupts_restore(ipl);
615
}
618
}
616
 
619
 
617
/** Create slab cache  */
620
/** Create slab cache  */
618
slab_cache_t * slab_cache_create(char *name,
621
slab_cache_t * slab_cache_create(char *name,
619
                 size_t size,
622
                 size_t size,
620
                 size_t align,
623
                 size_t align,
621
                 int (*constructor)(void *obj, int kmflag),
624
                 int (*constructor)(void *obj, int kmflag),
622
                 int (*destructor)(void *obj),
625
                 int (*destructor)(void *obj),
623
                 int flags)
626
                 int flags)
624
{
627
{
625
    slab_cache_t *cache;
628
    slab_cache_t *cache;
626
 
629
 
627
    cache = slab_alloc(&slab_cache_cache, 0);
630
    cache = slab_alloc(&slab_cache_cache, 0);
628
    _slab_cache_create(cache, name, size, align, constructor, destructor,
631
    _slab_cache_create(cache, name, size, align, constructor, destructor,
629
               flags);
632
               flags);
630
    return cache;
633
    return cache;
631
}
634
}
632
 
635
 
633
/**
636
/**
634
 * Reclaim space occupied by objects that are already free
637
 * Reclaim space occupied by objects that are already free
635
 *
638
 *
636
 * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing
639
 * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing
637
 * @return Number of freed pages
640
 * @return Number of freed pages
638
 */
641
 */
639
static count_t _slab_reclaim(slab_cache_t *cache, int flags)
642
static count_t _slab_reclaim(slab_cache_t *cache, int flags)
640
{
643
{
641
    int i;
644
    int i;
642
    slab_magazine_t *mag;
645
    slab_magazine_t *mag;
643
    count_t frames = 0;
646
    count_t frames = 0;
644
    int magcount;
647
    int magcount;
645
   
648
   
646
    if (cache->flags & SLAB_CACHE_NOMAGAZINE)
649
    if (cache->flags & SLAB_CACHE_NOMAGAZINE)
647
        return 0; /* Nothing to do */
650
        return 0; /* Nothing to do */
648
 
651
 
649
    /* We count up to original magazine count to avoid
652
    /* We count up to original magazine count to avoid
650
     * endless loop
653
     * endless loop
651
     */
654
     */
652
    magcount = atomic_get(&cache->magazine_counter);
655
    magcount = atomic_get(&cache->magazine_counter);
653
    while (magcount-- && (mag=get_mag_from_cache(cache,0))) {
656
    while (magcount-- && (mag=get_mag_from_cache(cache,0))) {
654
        frames += magazine_destroy(cache,mag);
657
        frames += magazine_destroy(cache,mag);
655
        if (!(flags & SLAB_RECLAIM_ALL) && frames)
658
        if (!(flags & SLAB_RECLAIM_ALL) && frames)
656
            break;
659
            break;
657
    }
660
    }
658
   
661
   
659
    if (flags & SLAB_RECLAIM_ALL) {
662
    if (flags & SLAB_RECLAIM_ALL) {
660
        /* Free cpu-bound magazines */
663
        /* Free cpu-bound magazines */
661
        /* Destroy CPU magazines */
664
        /* Destroy CPU magazines */
662
        for (i=0; i<config.cpu_count; i++) {
665
        for (i=0; i<config.cpu_count; i++) {
663
            spinlock_lock(&cache->mag_cache[i].lock);
666
            spinlock_lock(&cache->mag_cache[i].lock);
664
 
667
 
665
            mag = cache->mag_cache[i].current;
668
            mag = cache->mag_cache[i].current;
666
            if (mag)
669
            if (mag)
667
                frames += magazine_destroy(cache, mag);
670
                frames += magazine_destroy(cache, mag);
668
            cache->mag_cache[i].current = NULL;
671
            cache->mag_cache[i].current = NULL;
669
           
672
           
670
            mag = cache->mag_cache[i].last;
673
            mag = cache->mag_cache[i].last;
671
            if (mag)
674
            if (mag)
672
                frames += magazine_destroy(cache, mag);
675
                frames += magazine_destroy(cache, mag);
673
            cache->mag_cache[i].last = NULL;
676
            cache->mag_cache[i].last = NULL;
674
 
677
 
675
            spinlock_unlock(&cache->mag_cache[i].lock);
678
            spinlock_unlock(&cache->mag_cache[i].lock);
676
        }
679
        }
677
    }
680
    }
678
 
681
 
679
    return frames;
682
    return frames;
680
}
683
}
681
 
684
 
682
/** Check that there are no slabs and remove cache from system  */
685
/** Check that there are no slabs and remove cache from system  */
683
void slab_cache_destroy(slab_cache_t *cache)
686
void slab_cache_destroy(slab_cache_t *cache)
684
{
687
{
685
    ipl_t ipl;
688
    ipl_t ipl;
686
 
689
 
687
    /* First remove cache from link, so that we don't need
690
    /* First remove cache from link, so that we don't need
688
     * to disable interrupts later
691
     * to disable interrupts later
689
     */
692
     */
690
 
693
 
691
    ipl = interrupts_disable();
694
    ipl = interrupts_disable();
692
    spinlock_lock(&slab_cache_lock);
695
    spinlock_lock(&slab_cache_lock);
693
 
696
 
694
    list_remove(&cache->link);
697
    list_remove(&cache->link);
695
 
698
 
696
    spinlock_unlock(&slab_cache_lock);
699
    spinlock_unlock(&slab_cache_lock);
697
    interrupts_restore(ipl);
700
    interrupts_restore(ipl);
698
 
701
 
699
    /* Do not lock anything, we assume the software is correct and
702
    /* Do not lock anything, we assume the software is correct and
700
     * does not touch the cache when it decides to destroy it */
703
     * does not touch the cache when it decides to destroy it */
701
   
704
   
702
    /* Destroy all magazines */
705
    /* Destroy all magazines */
703
    _slab_reclaim(cache, SLAB_RECLAIM_ALL);
706
    _slab_reclaim(cache, SLAB_RECLAIM_ALL);
704
 
707
 
705
    /* All slabs must be empty */
708
    /* All slabs must be empty */
706
    if (!list_empty(&cache->full_slabs) \
709
    if (!list_empty(&cache->full_slabs) \
707
        || !list_empty(&cache->partial_slabs))
710
        || !list_empty(&cache->partial_slabs))
708
        panic("Destroying cache that is not empty.");
711
        panic("Destroying cache that is not empty.");
709
 
712
 
710
    if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
713
    if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
711
        slab_free(cpu_cache, cache->mag_cache);
714
        kfree(cache->mag_cache);
712
    slab_free(&slab_cache_cache, cache);
715
    slab_free(&slab_cache_cache, cache);
713
}
716
}
714
 
717
 
715
/** Allocate new object from cache - if no flags given, always returns
718
/** Allocate new object from cache - if no flags given, always returns
716
    memory */
719
    memory */
717
void * slab_alloc(slab_cache_t *cache, int flags)
720
void * slab_alloc(slab_cache_t *cache, int flags)
718
{
721
{
719
    ipl_t ipl;
722
    ipl_t ipl;
720
    void *result = NULL;
723
    void *result = NULL;
721
   
724
   
722
    /* Disable interrupts to avoid deadlocks with interrupt handlers */
725
    /* Disable interrupts to avoid deadlocks with interrupt handlers */
723
    ipl = interrupts_disable();
726
    ipl = interrupts_disable();
724
 
727
 
725
    if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
728
    if (!(cache->flags & SLAB_CACHE_NOMAGAZINE))
726
        result = magazine_obj_get(cache);
729
        result = magazine_obj_get(cache);
727
    if (!result)
730
    if (!result)
728
        result = slab_obj_create(cache, flags);
731
        result = slab_obj_create(cache, flags);
729
 
732
 
730
    interrupts_restore(ipl);
733
    interrupts_restore(ipl);
731
 
734
 
732
    if (result)
735
    if (result)
733
        atomic_inc(&cache->allocated_objs);
736
        atomic_inc(&cache->allocated_objs);
734
 
737
 
735
    return result;
738
    return result;
736
}
739
}
737
 
740
 
738
/** Return object to cache, use slab if known  */
741
/** Return object to cache, use slab if known  */
739
static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab)
742
static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab)
740
{
743
{
741
    ipl_t ipl;
744
    ipl_t ipl;
742
 
745
 
743
    ipl = interrupts_disable();
746
    ipl = interrupts_disable();
744
 
747
 
745
    if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
748
    if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \
746
        || magazine_obj_put(cache, obj)) {
749
        || magazine_obj_put(cache, obj)) {
747
 
750
 
748
        slab_obj_destroy(cache, obj, slab);
751
        slab_obj_destroy(cache, obj, slab);
749
 
752
 
750
    }
753
    }
751
    interrupts_restore(ipl);
754
    interrupts_restore(ipl);
752
    atomic_dec(&cache->allocated_objs);
755
    atomic_dec(&cache->allocated_objs);
753
}
756
}
754
 
757
 
755
/** Return slab object to cache */
758
/** Return slab object to cache */
756
void slab_free(slab_cache_t *cache, void *obj)
759
void slab_free(slab_cache_t *cache, void *obj)
757
{
760
{
758
    _slab_free(cache,obj,NULL);
761
    _slab_free(cache,obj,NULL);
759
}
762
}
760
 
763
 
761
/* Go through all caches and reclaim what is possible */
764
/* Go through all caches and reclaim what is possible */
762
count_t slab_reclaim(int flags)
765
count_t slab_reclaim(int flags)
763
{
766
{
764
    slab_cache_t *cache;
767
    slab_cache_t *cache;
765
    link_t *cur;
768
    link_t *cur;
766
    count_t frames = 0;
769
    count_t frames = 0;
767
 
770
 
768
    spinlock_lock(&slab_cache_lock);
771
    spinlock_lock(&slab_cache_lock);
769
 
772
 
770
    /* TODO: Add assert, that interrupts are disabled, otherwise
773
    /* TODO: Add assert, that interrupts are disabled, otherwise
771
     * memory allocation from interrupts can deadlock.
774
     * memory allocation from interrupts can deadlock.
772
     */
775
     */
773
 
776
 
774
    for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
777
    for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
775
        cache = list_get_instance(cur, slab_cache_t, link);
778
        cache = list_get_instance(cur, slab_cache_t, link);
776
        frames += _slab_reclaim(cache, flags);
779
        frames += _slab_reclaim(cache, flags);
777
    }
780
    }
778
 
781
 
779
    spinlock_unlock(&slab_cache_lock);
782
    spinlock_unlock(&slab_cache_lock);
780
 
783
 
781
    return frames;
784
    return frames;
782
}
785
}
783
 
786
 
784
 
787
 
785
/* Print list of slabs */
788
/* Print list of slabs */
786
void slab_print_list(void)
789
void slab_print_list(void)
787
{
790
{
788
    slab_cache_t *cache;
791
    slab_cache_t *cache;
789
    link_t *cur;
792
    link_t *cur;
790
    ipl_t ipl;
793
    ipl_t ipl;
791
   
794
   
792
    ipl = interrupts_disable();
795
    ipl = interrupts_disable();
793
    spinlock_lock(&slab_cache_lock);
796
    spinlock_lock(&slab_cache_lock);
794
    printf("SLAB name\tOsize\tPages\tObj/pg\tSlabs\tCached\tAllocobjs\tCtl\n");
797
    printf("SLAB name\tOsize\tPages\tObj/pg\tSlabs\tCached\tAllocobjs\tCtl\n");
795
    for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
798
    for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
796
        cache = list_get_instance(cur, slab_cache_t, link);
799
        cache = list_get_instance(cur, slab_cache_t, link);
797
        printf("%s\t%d\t%d\t%d\t%d\t%d\t%d\t\t%s\n", cache->name, cache->size,
800
        printf("%s\t%d\t%d\t%d\t%d\t%d\t%d\t\t%s\n", cache->name, cache->size,
798
               (1 << cache->order), cache->objects,
801
               (1 << cache->order), cache->objects,
799
               atomic_get(&cache->allocated_slabs),
802
               atomic_get(&cache->allocated_slabs),
800
               atomic_get(&cache->cached_objs),
803
               atomic_get(&cache->cached_objs),
801
               atomic_get(&cache->allocated_objs),
804
               atomic_get(&cache->allocated_objs),
802
               cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out");
805
               cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out");
803
    }
806
    }
804
    spinlock_unlock(&slab_cache_lock);
807
    spinlock_unlock(&slab_cache_lock);
805
    interrupts_restore(ipl);
808
    interrupts_restore(ipl);
806
}
809
}
807
 
810
 
808
#ifdef CONFIG_DEBUG
-
 
809
static int _slab_initialized = 0;
-
 
810
#endif
-
 
811
 
-
 
812
void slab_cache_init(void)
811
void slab_cache_init(void)
813
{
812
{
814
    int i, size;
813
    int i, size;
815
 
814
 
816
    /* Initialize magazine cache */
815
    /* Initialize magazine cache */
817
    _slab_cache_create(&mag_cache,
816
    _slab_cache_create(&mag_cache,
818
               "slab_magazine",
817
               "slab_magazine",
819
               sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*),
818
               sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*),
820
               sizeof(__address),
819
               sizeof(__address),
821
               NULL, NULL,
820
               NULL, NULL,
822
               SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
821
               SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
823
    /* Initialize slab_cache cache */
822
    /* Initialize slab_cache cache */
824
    _slab_cache_create(&slab_cache_cache,
823
    _slab_cache_create(&slab_cache_cache,
825
               "slab_cache",
824
               "slab_cache",
826
               sizeof(slab_cache_cache),
825
               sizeof(slab_cache_cache),
827
               sizeof(__address),
826
               sizeof(__address),
828
               NULL, NULL,
827
               NULL, NULL,
829
               SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
828
               SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE);
830
    /* Initialize external slab cache */
829
    /* Initialize external slab cache */
831
    slab_extern_cache = slab_cache_create("slab_extern",
830
    slab_extern_cache = slab_cache_create("slab_extern",
832
                          sizeof(slab_t),
831
                          sizeof(slab_t),
833
                          0, NULL, NULL,
832
                          0, NULL, NULL,
834
                          SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED);
833
                          SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED);
835
 
834
 
836
    /* Initialize structures for malloc */
835
    /* Initialize structures for malloc */
837
    for (i=0, size=(1<<SLAB_MIN_MALLOC_W);
836
    for (i=0, size=(1<<SLAB_MIN_MALLOC_W);
838
         i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1);
837
         i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1);
839
         i++, size <<= 1) {
838
         i++, size <<= 1) {
840
        malloc_caches[i] = slab_cache_create(malloc_names[i],
839
        malloc_caches[i] = slab_cache_create(malloc_names[i],
841
                             size, 0,
840
                             size, 0,
842
                             NULL,NULL, SLAB_CACHE_MAGDEFERRED);
841
                             NULL,NULL, SLAB_CACHE_MAGDEFERRED);
843
    }
842
    }
844
#ifdef CONFIG_DEBUG       
843
#ifdef CONFIG_DEBUG       
845
    _slab_initialized = 1;
844
    _slab_initialized = 1;
846
#endif
845
#endif
847
}
846
}
848
 
847
 
849
/** Enable cpu_cache
848
/** Enable cpu_cache
850
 *
849
 *
851
 * Kernel calls this function, when it knows the real number of
850
 * Kernel calls this function, when it knows the real number of
852
 * processors.
851
 * processors.
853
 * Allocate slab for cpucache and enable it on all existing
852
 * Allocate slab for cpucache and enable it on all existing
854
 * slabs that are SLAB_CACHE_MAGDEFERRED
853
 * slabs that are SLAB_CACHE_MAGDEFERRED
855
 */
854
 */
856
void slab_enable_cpucache(void)
855
void slab_enable_cpucache(void)
857
{
856
{
858
    link_t *cur;
857
    link_t *cur;
859
    slab_cache_t *s;
858
    slab_cache_t *s;
860
 
859
 
861
    cpu_cache = slab_cache_create("magcpucache",
-
 
862
                      sizeof(slab_mag_cache_t) * config.cpu_count,
-
 
863
                      0, NULL, NULL,
860
#ifdef CONFIG_DEBUG
864
                      SLAB_CACHE_NOMAGAZINE);
861
    _slab_initialized = 2;
-
 
862
#endif
-
 
863
 
865
    spinlock_lock(&slab_cache_lock);
864
    spinlock_lock(&slab_cache_lock);
866
   
865
   
867
    for (cur=slab_cache_list.next; cur != &slab_cache_list;cur=cur->next){
866
    for (cur=slab_cache_list.next; cur != &slab_cache_list;cur=cur->next){
868
        s = list_get_instance(cur, slab_cache_t, link);
867
        s = list_get_instance(cur, slab_cache_t, link);
869
        if ((s->flags & SLAB_CACHE_MAGDEFERRED) != SLAB_CACHE_MAGDEFERRED)
868
        if ((s->flags & SLAB_CACHE_MAGDEFERRED) != SLAB_CACHE_MAGDEFERRED)
870
            continue;
869
            continue;
871
        make_magcache(s);
870
        make_magcache(s);
872
        s->flags &= ~SLAB_CACHE_MAGDEFERRED;
871
        s->flags &= ~SLAB_CACHE_MAGDEFERRED;
873
    }
872
    }
874
 
873
 
875
    spinlock_unlock(&slab_cache_lock);
874
    spinlock_unlock(&slab_cache_lock);
876
}
875
}
877
 
876
 
878
/**************************************/
877
/**************************************/
879
/* kalloc/kfree functions             */
878
/* kalloc/kfree functions             */
880
void * kalloc(unsigned int size, int flags)
879
void * kalloc(unsigned int size, int flags)
881
{
880
{
882
    int idx;
881
    int idx;
883
 
882
 
884
    ASSERT(_slab_initialized);
883
    ASSERT(_slab_initialized);
885
    ASSERT( size && size <= (1 << SLAB_MAX_MALLOC_W));
884
    ASSERT( size && size <= (1 << SLAB_MAX_MALLOC_W));
886
   
885
   
887
    if (size < (1 << SLAB_MIN_MALLOC_W))
886
    if (size < (1 << SLAB_MIN_MALLOC_W))
888
        size = (1 << SLAB_MIN_MALLOC_W);
887
        size = (1 << SLAB_MIN_MALLOC_W);
889
 
888
 
890
    idx = fnzb(size-1) - SLAB_MIN_MALLOC_W + 1;
889
    idx = fnzb(size-1) - SLAB_MIN_MALLOC_W + 1;
891
 
890
 
892
    return slab_alloc(malloc_caches[idx], flags);
891
    return slab_alloc(malloc_caches[idx], flags);
893
}
892
}
894
 
893
 
895
 
894
 
896
void kfree(void *obj)
895
void kfree(void *obj)
897
{
896
{
898
    slab_t *slab;
897
    slab_t *slab;
899
 
898
 
900
    if (!obj) return;
899
    if (!obj) return;
901
 
900
 
902
    slab = obj2slab(obj);
901
    slab = obj2slab(obj);
903
    _slab_free(slab->cache, obj, slab);
902
    _slab_free(slab->cache, obj, slab);
904
}
903
}
905
 
904