Rev 3104 | Rev 3183 | Go to most recent revision | Only display areas with differences | Regard whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 3104 | Rev 3180 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (c) 2006 Ondrej Palkovsky |
2 | * Copyright (c) 2006 Ondrej Palkovsky |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /** @addtogroup genericmm |
29 | /** @addtogroup genericmm |
30 | * @{ |
30 | * @{ |
31 | */ |
31 | */ |
32 | 32 | ||
33 | /** |
33 | /** |
34 | * @file |
34 | * @file |
35 | * @brief Slab allocator. |
35 | * @brief Slab allocator. |
36 | * |
36 | * |
37 | * The slab allocator is closely modelled after OpenSolaris slab allocator. |
37 | * The slab allocator is closely modelled after OpenSolaris slab allocator. |
38 | * @see http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/ |
38 | * @see http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/ |
39 | * |
39 | * |
40 | * with the following exceptions: |
40 | * with the following exceptions: |
41 | * @li empty slabs are deallocated immediately |
41 | * @li empty slabs are deallocated immediately |
42 | * (in Linux they are kept in linked list, in Solaris ???) |
42 | * (in Linux they are kept in linked list, in Solaris ???) |
43 | * @li empty magazines are deallocated when not needed |
43 | * @li empty magazines are deallocated when not needed |
44 | * (in Solaris they are held in linked list in slab cache) |
44 | * (in Solaris they are held in linked list in slab cache) |
45 | * |
45 | * |
46 | * Following features are not currently supported but would be easy to do: |
46 | * Following features are not currently supported but would be easy to do: |
47 | * @li cache coloring |
47 | * @li cache coloring |
48 | * @li dynamic magazine growing (different magazine sizes are already |
48 | * @li dynamic magazine growing (different magazine sizes are already |
49 | * supported, but we would need to adjust allocation strategy) |
49 | * supported, but we would need to adjust allocation strategy) |
50 | * |
50 | * |
51 | * The slab allocator supports per-CPU caches ('magazines') to facilitate |
51 | * The slab allocator supports per-CPU caches ('magazines') to facilitate |
52 | * good SMP scaling. |
52 | * good SMP scaling. |
53 | * |
53 | * |
54 | * When a new object is being allocated, it is first checked, if it is |
54 | * When a new object is being allocated, it is first checked, if it is |
55 | * available in a CPU-bound magazine. If it is not found there, it is |
55 | * available in a CPU-bound magazine. If it is not found there, it is |
56 | * allocated from a CPU-shared slab - if a partially full one is found, |
56 | * allocated from a CPU-shared slab - if a partially full one is found, |
57 | * it is used, otherwise a new one is allocated. |
57 | * it is used, otherwise a new one is allocated. |
58 | * |
58 | * |
59 | * When an object is being deallocated, it is put to a CPU-bound magazine. |
59 | * When an object is being deallocated, it is put to a CPU-bound magazine. |
60 | * If there is no such magazine, a new one is allocated (if this fails, |
60 | * If there is no such magazine, a new one is allocated (if this fails, |
61 | * the object is deallocated into slab). If the magazine is full, it is |
61 | * the object is deallocated into slab). If the magazine is full, it is |
62 | * put into cpu-shared list of magazines and a new one is allocated. |
62 | * put into cpu-shared list of magazines and a new one is allocated. |
63 | * |
63 | * |
64 | * The CPU-bound magazine is actually a pair of magazines in order to avoid |
64 | * The CPU-bound magazine is actually a pair of magazines in order to avoid |
65 | * thrashing when somebody is allocating/deallocating 1 item at the magazine |
65 | * thrashing when somebody is allocating/deallocating 1 item at the magazine |
66 | * size boundary. LIFO order is enforced, which should avoid fragmentation |
66 | * size boundary. LIFO order is enforced, which should avoid fragmentation |
67 | * as much as possible. |
67 | * as much as possible. |
68 | * |
68 | * |
69 | * Every cache contains list of full slabs and list of partially full slabs. |
69 | * Every cache contains list of full slabs and list of partially full slabs. |
70 | * Empty slabs are immediately freed (thrashing will be avoided because |
70 | * Empty slabs are immediately freed (thrashing will be avoided because |
71 | * of magazines). |
71 | * of magazines). |
72 | * |
72 | * |
73 | * The slab information structure is kept inside the data area, if possible. |
73 | * The slab information structure is kept inside the data area, if possible. |
74 | * The cache can be marked that it should not use magazines. This is used |
74 | * The cache can be marked that it should not use magazines. This is used |
75 | * only for slab related caches to avoid deadlocks and infinite recursion |
75 | * only for slab related caches to avoid deadlocks and infinite recursion |
76 | * (the slab allocator uses itself for allocating all it's control structures). |
76 | * (the slab allocator uses itself for allocating all it's control structures). |
77 | * |
77 | * |
78 | * The slab allocator allocates a lot of space and does not free it. When |
78 | * The slab allocator allocates a lot of space and does not free it. When |
79 | * the frame allocator fails to allocate a frame, it calls slab_reclaim(). |
79 | * the frame allocator fails to allocate a frame, it calls slab_reclaim(). |
80 | * It tries 'light reclaim' first, then brutal reclaim. The light reclaim |
80 | * It tries 'light reclaim' first, then brutal reclaim. The light reclaim |
81 | * releases slabs from cpu-shared magazine-list, until at least 1 slab |
81 | * releases slabs from cpu-shared magazine-list, until at least 1 slab |
82 | * is deallocated in each cache (this algorithm should probably change). |
82 | * is deallocated in each cache (this algorithm should probably change). |
83 | * The brutal reclaim removes all cached objects, even from CPU-bound |
83 | * The brutal reclaim removes all cached objects, even from CPU-bound |
84 | * magazines. |
84 | * magazines. |
85 | * |
85 | * |
86 | * @todo |
86 | * @todo |
87 | * For better CPU-scaling the magazine allocation strategy should |
87 | * For better CPU-scaling the magazine allocation strategy should |
88 | * be extended. Currently, if the cache does not have magazine, it asks |
88 | * be extended. Currently, if the cache does not have magazine, it asks |
89 | * for non-cpu cached magazine cache to provide one. It might be feasible |
89 | * for non-cpu cached magazine cache to provide one. It might be feasible |
90 | * to add cpu-cached magazine cache (which would allocate it's magazines |
90 | * to add cpu-cached magazine cache (which would allocate it's magazines |
91 | * from non-cpu-cached mag. cache). This would provide a nice per-cpu |
91 | * from non-cpu-cached mag. cache). This would provide a nice per-cpu |
92 | * buffer. The other possibility is to use the per-cache |
92 | * buffer. The other possibility is to use the per-cache |
93 | * 'empty-magazine-list', which decreases competing for 1 per-system |
93 | * 'empty-magazine-list', which decreases competing for 1 per-system |
94 | * magazine cache. |
94 | * magazine cache. |
95 | * |
95 | * |
96 | * @todo |
96 | * @todo |
97 | * it might be good to add granularity of locks even to slab level, |
97 | * it might be good to add granularity of locks even to slab level, |
98 | * we could then try_spinlock over all partial slabs and thus improve |
98 | * we could then try_spinlock over all partial slabs and thus improve |
99 | * scalability even on slab level |
99 | * scalability even on slab level |
100 | */ |
100 | */ |
101 | 101 | ||
102 | #include <synch/spinlock.h> |
102 | #include <synch/spinlock.h> |
103 | #include <mm/slab.h> |
103 | #include <mm/slab.h> |
104 | #include <adt/list.h> |
104 | #include <adt/list.h> |
105 | #include <memstr.h> |
105 | #include <memstr.h> |
106 | #include <align.h> |
106 | #include <align.h> |
107 | #include <mm/frame.h> |
107 | #include <mm/frame.h> |
108 | #include <config.h> |
108 | #include <config.h> |
109 | #include <print.h> |
109 | #include <print.h> |
110 | #include <arch.h> |
110 | #include <arch.h> |
111 | #include <panic.h> |
111 | #include <panic.h> |
112 | #include <debug.h> |
112 | #include <debug.h> |
113 | #include <bitops.h> |
113 | #include <bitops.h> |
114 | #include <macros.h> |
114 | #include <macros.h> |
115 | 115 | ||
116 | SPINLOCK_INITIALIZE(slab_cache_lock); |
116 | SPINLOCK_INITIALIZE(slab_cache_lock); |
117 | static LIST_INITIALIZE(slab_cache_list); |
117 | static LIST_INITIALIZE(slab_cache_list); |
118 | 118 | ||
119 | /** Magazine cache */ |
119 | /** Magazine cache */ |
120 | static slab_cache_t mag_cache; |
120 | static slab_cache_t mag_cache; |
121 | /** Cache for cache descriptors */ |
121 | /** Cache for cache descriptors */ |
122 | static slab_cache_t slab_cache_cache; |
122 | static slab_cache_t slab_cache_cache; |
123 | /** Cache for external slab descriptors |
123 | /** Cache for external slab descriptors |
124 | * This time we want per-cpu cache, so do not make it static |
124 | * This time we want per-cpu cache, so do not make it static |
125 | * - using slab for internal slab structures will not deadlock, |
125 | * - using slab for internal slab structures will not deadlock, |
126 | * as all slab structures are 'small' - control structures of |
126 | * as all slab structures are 'small' - control structures of |
127 | * their caches do not require further allocation |
127 | * their caches do not require further allocation |
128 | */ |
128 | */ |
129 | static slab_cache_t *slab_extern_cache; |
129 | static slab_cache_t *slab_extern_cache; |
130 | /** Caches for malloc */ |
130 | /** Caches for malloc */ |
131 | static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1]; |
131 | static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1]; |
132 | char *malloc_names[] = { |
132 | char *malloc_names[] = { |
133 | "malloc-16", |
133 | "malloc-16", |
134 | "malloc-32", |
134 | "malloc-32", |
135 | "malloc-64", |
135 | "malloc-64", |
136 | "malloc-128", |
136 | "malloc-128", |
137 | "malloc-256", |
137 | "malloc-256", |
138 | "malloc-512", |
138 | "malloc-512", |
139 | "malloc-1K", |
139 | "malloc-1K", |
140 | "malloc-2K", |
140 | "malloc-2K", |
141 | "malloc-4K", |
141 | "malloc-4K", |
142 | "malloc-8K", |
142 | "malloc-8K", |
143 | "malloc-16K", |
143 | "malloc-16K", |
144 | "malloc-32K", |
144 | "malloc-32K", |
145 | "malloc-64K", |
145 | "malloc-64K", |
146 | "malloc-128K", |
146 | "malloc-128K", |
147 | "malloc-256K" |
147 | "malloc-256K" |
148 | }; |
148 | }; |
149 | 149 | ||
150 | /** Slab descriptor */ |
150 | /** Slab descriptor */ |
151 | typedef struct { |
151 | typedef struct { |
152 | slab_cache_t *cache; /**< Pointer to parent cache. */ |
152 | slab_cache_t *cache; /**< Pointer to parent cache. */ |
153 | link_t link; /**< List of full/partial slabs. */ |
153 | link_t link; /**< List of full/partial slabs. */ |
154 | void *start; /**< Start address of first available item. */ |
154 | void *start; /**< Start address of first available item. */ |
155 | count_t available; /**< Count of available items in this slab. */ |
155 | count_t available; /**< Count of available items in this slab. */ |
156 | index_t nextavail; /**< The index of next available item. */ |
156 | index_t nextavail; /**< The index of next available item. */ |
157 | } slab_t; |
157 | } slab_t; |
158 | 158 | ||
159 | #ifdef CONFIG_DEBUG |
159 | #ifdef CONFIG_DEBUG |
160 | static int _slab_initialized = 0; |
160 | static int _slab_initialized = 0; |
161 | #endif |
161 | #endif |
162 | 162 | ||
163 | /**************************************/ |
163 | /**************************************/ |
164 | /* Slab allocation functions */ |
164 | /* Slab allocation functions */ |
165 | 165 | ||
166 | /** |
166 | /** |
167 | * Allocate frames for slab space and initialize |
167 | * Allocate frames for slab space and initialize |
168 | * |
168 | * |
169 | */ |
169 | */ |
170 | static slab_t * slab_space_alloc(slab_cache_t *cache, int flags) |
170 | static slab_t *slab_space_alloc(slab_cache_t *cache, int flags) |
171 | { |
171 | { |
172 | void *data; |
172 | void *data; |
173 | slab_t *slab; |
173 | slab_t *slab; |
174 | size_t fsize; |
174 | size_t fsize; |
175 | unsigned int i; |
175 | unsigned int i; |
176 | unsigned int zone = 0; |
176 | unsigned int zone = 0; |
177 | 177 | ||
178 | data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone); |
178 | data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone); |
179 | if (!data) { |
179 | if (!data) { |
180 | return NULL; |
180 | return NULL; |
181 | } |
181 | } |
182 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) { |
182 | if (!(cache->flags & SLAB_CACHE_SLINSIDE)) { |
183 | slab = slab_alloc(slab_extern_cache, flags); |
183 | slab = slab_alloc(slab_extern_cache, flags); |
184 | if (!slab) { |
184 | if (!slab) { |
185 | frame_free(KA2PA(data)); |
185 | frame_free(KA2PA(data)); |
186 | return NULL; |
186 | return NULL; |
187 | } |
187 | } |
188 | } else { |
188 | } else { |
189 | fsize = (PAGE_SIZE << cache->order); |
189 | fsize = (PAGE_SIZE << cache->order); |
190 | slab = data + fsize - sizeof(*slab); |
190 | slab = data + fsize - sizeof(*slab); |
191 | } |
191 | } |
192 | 192 | ||
193 | /* Fill in slab structures */ |
193 | /* Fill in slab structures */ |
194 | for (i = 0; i < ((unsigned int) 1 << cache->order); i++) |
194 | for (i = 0; i < ((unsigned int) 1 << cache->order); i++) |
195 | frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab, zone); |
195 | frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab, zone); |
196 | 196 | ||
197 | slab->start = data; |
197 | slab->start = data; |
198 | slab->available = cache->objects; |
198 | slab->available = cache->objects; |
199 | slab->nextavail = 0; |
199 | slab->nextavail = 0; |
200 | slab->cache = cache; |
200 | slab->cache = cache; |
201 | 201 | ||
202 | for (i = 0; i < cache->objects; i++) |
202 | for (i = 0; i < cache->objects; i++) |
203 | *((int *) (slab->start + i*cache->size)) = i+1; |
203 | *((int *) (slab->start + i*cache->size)) = i + 1; |
204 | 204 | ||
205 | atomic_inc(&cache->allocated_slabs); |
205 | atomic_inc(&cache->allocated_slabs); |
206 | return slab; |
206 | return slab; |
207 | } |
207 | } |
208 | 208 | ||
209 | /** |
209 | /** |
210 | * Deallocate space associated with slab |
210 | * Deallocate space associated with slab |
211 | * |
211 | * |
212 | * @return number of freed frames |
212 | * @return number of freed frames |
213 | */ |
213 | */ |
214 | static count_t slab_space_free(slab_cache_t *cache, slab_t *slab) |
214 | static count_t slab_space_free(slab_cache_t *cache, slab_t *slab) |
215 | { |
215 | { |
216 | frame_free(KA2PA(slab->start)); |
216 | frame_free(KA2PA(slab->start)); |
217 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) |
217 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) |
218 | slab_free(slab_extern_cache, slab); |
218 | slab_free(slab_extern_cache, slab); |
219 | 219 | ||
220 | atomic_dec(&cache->allocated_slabs); |
220 | atomic_dec(&cache->allocated_slabs); |
221 | 221 | ||
222 | return 1 << cache->order; |
222 | return 1 << cache->order; |
223 | } |
223 | } |
224 | 224 | ||
225 | /** Map object to slab structure */ |
225 | /** Map object to slab structure */ |
226 | static slab_t * obj2slab(void *obj) |
226 | static slab_t * obj2slab(void *obj) |
227 | { |
227 | { |
228 | return (slab_t *) frame_get_parent(ADDR2PFN(KA2PA(obj)), 0); |
228 | return (slab_t *) frame_get_parent(ADDR2PFN(KA2PA(obj)), 0); |
229 | } |
229 | } |
230 | 230 | ||
231 | /**************************************/ |
231 | /**************************************/ |
232 | /* Slab functions */ |
232 | /* Slab functions */ |
233 | 233 | ||
234 | 234 | ||
235 | /** |
235 | /** |
236 | * Return object to slab and call a destructor |
236 | * Return object to slab and call a destructor |
237 | * |
237 | * |
238 | * @param slab If the caller knows directly slab of the object, otherwise NULL |
238 | * @param slab If the caller knows directly slab of the object, otherwise NULL |
239 | * |
239 | * |
240 | * @return Number of freed pages |
240 | * @return Number of freed pages |
241 | */ |
241 | */ |
242 | static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, |
242 | static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, slab_t *slab) |
243 | slab_t *slab) |
- | |
244 | { |
243 | { |
245 | int freed = 0; |
244 | int freed = 0; |
246 | 245 | ||
247 | if (!slab) |
246 | if (!slab) |
248 | slab = obj2slab(obj); |
247 | slab = obj2slab(obj); |
249 | 248 | ||
250 | ASSERT(slab->cache == cache); |
249 | ASSERT(slab->cache == cache); |
251 | 250 | ||
252 | if (cache->destructor) |
251 | if (cache->destructor) |
253 | freed = cache->destructor(obj); |
252 | freed = cache->destructor(obj); |
254 | 253 | ||
255 | spinlock_lock(&cache->slablock); |
254 | spinlock_lock(&cache->slablock); |
256 | ASSERT(slab->available < cache->objects); |
255 | ASSERT(slab->available < cache->objects); |
257 | 256 | ||
258 | *((int *)obj) = slab->nextavail; |
257 | *((int *)obj) = slab->nextavail; |
259 | slab->nextavail = (obj - slab->start)/cache->size; |
258 | slab->nextavail = (obj - slab->start) / cache->size; |
260 | slab->available++; |
259 | slab->available++; |
261 | 260 | ||
262 | /* Move it to correct list */ |
261 | /* Move it to correct list */ |
263 | if (slab->available == cache->objects) { |
262 | if (slab->available == cache->objects) { |
264 | /* Free associated memory */ |
263 | /* Free associated memory */ |
265 | list_remove(&slab->link); |
264 | list_remove(&slab->link); |
266 | spinlock_unlock(&cache->slablock); |
265 | spinlock_unlock(&cache->slablock); |
267 | 266 | ||
268 | return freed + slab_space_free(cache, slab); |
267 | return freed + slab_space_free(cache, slab); |
269 | 268 | ||
270 | } else if (slab->available == 1) { |
269 | } else if (slab->available == 1) { |
271 | /* It was in full, move to partial */ |
270 | /* It was in full, move to partial */ |
272 | list_remove(&slab->link); |
271 | list_remove(&slab->link); |
273 | list_prepend(&slab->link, &cache->partial_slabs); |
272 | list_prepend(&slab->link, &cache->partial_slabs); |
274 | } |
273 | } |
275 | spinlock_unlock(&cache->slablock); |
274 | spinlock_unlock(&cache->slablock); |
276 | return freed; |
275 | return freed; |
277 | } |
276 | } |
278 | 277 | ||
279 | /** |
278 | /** |
280 | * Take new object from slab or create new if needed |
279 | * Take new object from slab or create new if needed |
281 | * |
280 | * |
282 | * @return Object address or null |
281 | * @return Object address or null |
283 | */ |
282 | */ |
284 | static void * slab_obj_create(slab_cache_t *cache, int flags) |
283 | static void *slab_obj_create(slab_cache_t *cache, int flags) |
285 | { |
284 | { |
286 | slab_t *slab; |
285 | slab_t *slab; |
287 | void *obj; |
286 | void *obj; |
288 | 287 | ||
289 | spinlock_lock(&cache->slablock); |
288 | spinlock_lock(&cache->slablock); |
290 | 289 | ||
291 | if (list_empty(&cache->partial_slabs)) { |
290 | if (list_empty(&cache->partial_slabs)) { |
292 | /* Allow recursion and reclaiming |
291 | /* Allow recursion and reclaiming |
293 | * - this should work, as the slab control structures |
292 | * - this should work, as the slab control structures |
294 | * are small and do not need to allocate with anything |
293 | * are small and do not need to allocate with anything |
295 | * other than frame_alloc when they are allocating, |
294 | * other than frame_alloc when they are allocating, |
296 | * that's why we should get recursion at most 1-level deep |
295 | * that's why we should get recursion at most 1-level deep |
297 | */ |
296 | */ |
298 | spinlock_unlock(&cache->slablock); |
297 | spinlock_unlock(&cache->slablock); |
299 | slab = slab_space_alloc(cache, flags); |
298 | slab = slab_space_alloc(cache, flags); |
300 | if (!slab) |
299 | if (!slab) |
301 | return NULL; |
300 | return NULL; |
302 | spinlock_lock(&cache->slablock); |
301 | spinlock_lock(&cache->slablock); |
303 | } else { |
302 | } else { |
304 | slab = list_get_instance(cache->partial_slabs.next, slab_t, link); |
303 | slab = list_get_instance(cache->partial_slabs.next, slab_t, |
- | 304 | link); |
|
305 | list_remove(&slab->link); |
305 | list_remove(&slab->link); |
306 | } |
306 | } |
307 | obj = slab->start + slab->nextavail * cache->size; |
307 | obj = slab->start + slab->nextavail * cache->size; |
308 | slab->nextavail = *((int *)obj); |
308 | slab->nextavail = *((int *)obj); |
309 | slab->available--; |
309 | slab->available--; |
310 | 310 | ||
311 | if (!slab->available) |
311 | if (!slab->available) |
312 | list_prepend(&slab->link, &cache->full_slabs); |
312 | list_prepend(&slab->link, &cache->full_slabs); |
313 | else |
313 | else |
314 | list_prepend(&slab->link, &cache->partial_slabs); |
314 | list_prepend(&slab->link, &cache->partial_slabs); |
315 | 315 | ||
316 | spinlock_unlock(&cache->slablock); |
316 | spinlock_unlock(&cache->slablock); |
317 | 317 | ||
318 | if (cache->constructor && cache->constructor(obj, flags)) { |
318 | if (cache->constructor && cache->constructor(obj, flags)) { |
319 | /* Bad, bad, construction failed */ |
319 | /* Bad, bad, construction failed */ |
320 | slab_obj_destroy(cache, obj, slab); |
320 | slab_obj_destroy(cache, obj, slab); |
321 | return NULL; |
321 | return NULL; |
322 | } |
322 | } |
323 | return obj; |
323 | return obj; |
324 | } |
324 | } |
325 | 325 | ||
326 | /**************************************/ |
326 | /**************************************/ |
327 | /* CPU-Cache slab functions */ |
327 | /* CPU-Cache slab functions */ |
328 | 328 | ||
329 | /** |
329 | /** |
330 | * Finds a full magazine in cache, takes it from list |
330 | * Finds a full magazine in cache, takes it from list |
331 | * and returns it |
331 | * and returns it |
332 | * |
332 | * |
333 | * @param first If true, return first, else last mag |
333 | * @param first If true, return first, else last mag |
334 | */ |
334 | */ |
335 | static slab_magazine_t * get_mag_from_cache(slab_cache_t *cache, |
335 | static slab_magazine_t *get_mag_from_cache(slab_cache_t *cache, int first) |
336 | int first) |
- | |
337 | { |
336 | { |
338 | slab_magazine_t *mag = NULL; |
337 | slab_magazine_t *mag = NULL; |
339 | link_t *cur; |
338 | link_t *cur; |
340 | 339 | ||
341 | spinlock_lock(&cache->maglock); |
340 | spinlock_lock(&cache->maglock); |
342 | if (!list_empty(&cache->magazines)) { |
341 | if (!list_empty(&cache->magazines)) { |
343 | if (first) |
342 | if (first) |
344 | cur = cache->magazines.next; |
343 | cur = cache->magazines.next; |
345 | else |
344 | else |
346 | cur = cache->magazines.prev; |
345 | cur = cache->magazines.prev; |
347 | mag = list_get_instance(cur, slab_magazine_t, link); |
346 | mag = list_get_instance(cur, slab_magazine_t, link); |
348 | list_remove(&mag->link); |
347 | list_remove(&mag->link); |
349 | atomic_dec(&cache->magazine_counter); |
348 | atomic_dec(&cache->magazine_counter); |
350 | } |
349 | } |
351 | spinlock_unlock(&cache->maglock); |
350 | spinlock_unlock(&cache->maglock); |
352 | return mag; |
351 | return mag; |
353 | } |
352 | } |
354 | 353 | ||
355 | /** Prepend magazine to magazine list in cache */ |
354 | /** Prepend magazine to magazine list in cache */ |
356 | static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag) |
355 | static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag) |
357 | { |
356 | { |
358 | spinlock_lock(&cache->maglock); |
357 | spinlock_lock(&cache->maglock); |
359 | 358 | ||
360 | list_prepend(&mag->link, &cache->magazines); |
359 | list_prepend(&mag->link, &cache->magazines); |
361 | atomic_inc(&cache->magazine_counter); |
360 | atomic_inc(&cache->magazine_counter); |
362 | 361 | ||
363 | spinlock_unlock(&cache->maglock); |
362 | spinlock_unlock(&cache->maglock); |
364 | } |
363 | } |
365 | 364 | ||
366 | /** |
365 | /** |
367 | * Free all objects in magazine and free memory associated with magazine |
366 | * Free all objects in magazine and free memory associated with magazine |
368 | * |
367 | * |
369 | * @return Number of freed pages |
368 | * @return Number of freed pages |
370 | */ |
369 | */ |
371 | static count_t magazine_destroy(slab_cache_t *cache, |
370 | static count_t magazine_destroy(slab_cache_t *cache, slab_magazine_t *mag) |
372 | slab_magazine_t *mag) |
- | |
373 | { |
371 | { |
374 | unsigned int i; |
372 | unsigned int i; |
375 | count_t frames = 0; |
373 | count_t frames = 0; |
376 | 374 | ||
377 | for (i = 0; i < mag->busy; i++) { |
375 | for (i = 0; i < mag->busy; i++) { |
378 | frames += slab_obj_destroy(cache, mag->objs[i], NULL); |
376 | frames += slab_obj_destroy(cache, mag->objs[i], NULL); |
379 | atomic_dec(&cache->cached_objs); |
377 | atomic_dec(&cache->cached_objs); |
380 | } |
378 | } |
381 | 379 | ||
382 | slab_free(&mag_cache, mag); |
380 | slab_free(&mag_cache, mag); |
383 | 381 | ||
384 | return frames; |
382 | return frames; |
385 | } |
383 | } |
386 | 384 | ||
387 | /** |
385 | /** |
388 | * Find full magazine, set it as current and return it |
386 | * Find full magazine, set it as current and return it |
389 | * |
387 | * |
390 | * Assume cpu_magazine lock is held |
388 | * Assume cpu_magazine lock is held |
391 | */ |
389 | */ |
392 | static slab_magazine_t * get_full_current_mag(slab_cache_t *cache) |
390 | static slab_magazine_t *get_full_current_mag(slab_cache_t *cache) |
393 | { |
391 | { |
394 | slab_magazine_t *cmag, *lastmag, *newmag; |
392 | slab_magazine_t *cmag, *lastmag, *newmag; |
395 | 393 | ||
396 | cmag = cache->mag_cache[CPU->id].current; |
394 | cmag = cache->mag_cache[CPU->id].current; |
397 | lastmag = cache->mag_cache[CPU->id].last; |
395 | lastmag = cache->mag_cache[CPU->id].last; |
398 | if (cmag) { /* First try local CPU magazines */ |
396 | if (cmag) { /* First try local CPU magazines */ |
399 | if (cmag->busy) |
397 | if (cmag->busy) |
400 | return cmag; |
398 | return cmag; |
401 | 399 | ||
402 | if (lastmag && lastmag->busy) { |
400 | if (lastmag && lastmag->busy) { |
403 | cache->mag_cache[CPU->id].current = lastmag; |
401 | cache->mag_cache[CPU->id].current = lastmag; |
404 | cache->mag_cache[CPU->id].last = cmag; |
402 | cache->mag_cache[CPU->id].last = cmag; |
405 | return lastmag; |
403 | return lastmag; |
406 | } |
404 | } |
407 | } |
405 | } |
408 | /* Local magazines are empty, import one from magazine list */ |
406 | /* Local magazines are empty, import one from magazine list */ |
409 | newmag = get_mag_from_cache(cache, 1); |
407 | newmag = get_mag_from_cache(cache, 1); |
410 | if (!newmag) |
408 | if (!newmag) |
411 | return NULL; |
409 | return NULL; |
412 | 410 | ||
413 | if (lastmag) |
411 | if (lastmag) |
414 | magazine_destroy(cache, lastmag); |
412 | magazine_destroy(cache, lastmag); |
415 | 413 | ||
416 | cache->mag_cache[CPU->id].last = cmag; |
414 | cache->mag_cache[CPU->id].last = cmag; |
417 | cache->mag_cache[CPU->id].current = newmag; |
415 | cache->mag_cache[CPU->id].current = newmag; |
418 | return newmag; |
416 | return newmag; |
419 | } |
417 | } |
420 | 418 | ||
421 | /** |
419 | /** |
422 | * Try to find object in CPU-cache magazines |
420 | * Try to find object in CPU-cache magazines |
423 | * |
421 | * |
424 | * @return Pointer to object or NULL if not available |
422 | * @return Pointer to object or NULL if not available |
425 | */ |
423 | */ |
426 | static void * magazine_obj_get(slab_cache_t *cache) |
424 | static void *magazine_obj_get(slab_cache_t *cache) |
427 | { |
425 | { |
428 | slab_magazine_t *mag; |
426 | slab_magazine_t *mag; |
429 | void *obj; |
427 | void *obj; |
430 | 428 | ||
431 | if (!CPU) |
429 | if (!CPU) |
432 | return NULL; |
430 | return NULL; |
433 | 431 | ||
434 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
432 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
435 | 433 | ||
436 | mag = get_full_current_mag(cache); |
434 | mag = get_full_current_mag(cache); |
437 | if (!mag) { |
435 | if (!mag) { |
438 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
436 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
439 | return NULL; |
437 | return NULL; |
440 | } |
438 | } |
441 | obj = mag->objs[--mag->busy]; |
439 | obj = mag->objs[--mag->busy]; |
442 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
440 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
443 | atomic_dec(&cache->cached_objs); |
441 | atomic_dec(&cache->cached_objs); |
444 | 442 | ||
445 | return obj; |
443 | return obj; |
446 | } |
444 | } |
447 | 445 | ||
448 | /** |
446 | /** |
449 | * Assure that the current magazine is empty, return pointer to it, or NULL if |
447 | * Assure that the current magazine is empty, return pointer to it, or NULL if |
450 | * no empty magazine is available and cannot be allocated |
448 | * no empty magazine is available and cannot be allocated |
451 | * |
449 | * |
452 | * Assume mag_cache[CPU->id].lock is held |
450 | * Assume mag_cache[CPU->id].lock is held |
453 | * |
451 | * |
454 | * We have 2 magazines bound to processor. |
452 | * We have 2 magazines bound to processor. |
455 | * First try the current. |
453 | * First try the current. |
456 | * If full, try the last. |
454 | * If full, try the last. |
457 | * If full, put to magazines list. |
455 | * If full, put to magazines list. |
458 | * allocate new, exchange last & current |
456 | * allocate new, exchange last & current |
459 | * |
457 | * |
460 | */ |
458 | */ |
461 | static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache) |
459 | static slab_magazine_t *make_empty_current_mag(slab_cache_t *cache) |
462 | { |
460 | { |
463 | slab_magazine_t *cmag,*lastmag,*newmag; |
461 | slab_magazine_t *cmag,*lastmag,*newmag; |
464 | 462 | ||
465 | cmag = cache->mag_cache[CPU->id].current; |
463 | cmag = cache->mag_cache[CPU->id].current; |
466 | lastmag = cache->mag_cache[CPU->id].last; |
464 | lastmag = cache->mag_cache[CPU->id].last; |
467 | 465 | ||
468 | if (cmag) { |
466 | if (cmag) { |
469 | if (cmag->busy < cmag->size) |
467 | if (cmag->busy < cmag->size) |
470 | return cmag; |
468 | return cmag; |
471 | if (lastmag && lastmag->busy < lastmag->size) { |
469 | if (lastmag && lastmag->busy < lastmag->size) { |
472 | cache->mag_cache[CPU->id].last = cmag; |
470 | cache->mag_cache[CPU->id].last = cmag; |
473 | cache->mag_cache[CPU->id].current = lastmag; |
471 | cache->mag_cache[CPU->id].current = lastmag; |
474 | return lastmag; |
472 | return lastmag; |
475 | } |
473 | } |
476 | } |
474 | } |
477 | /* current | last are full | nonexistent, allocate new */ |
475 | /* current | last are full | nonexistent, allocate new */ |
478 | /* We do not want to sleep just because of caching */ |
476 | /* We do not want to sleep just because of caching */ |
479 | /* Especially we do not want reclaiming to start, as |
477 | /* Especially we do not want reclaiming to start, as |
480 | * this would deadlock */ |
478 | * this would deadlock */ |
481 | newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); |
479 | newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); |
482 | if (!newmag) |
480 | if (!newmag) |
483 | return NULL; |
481 | return NULL; |
484 | newmag->size = SLAB_MAG_SIZE; |
482 | newmag->size = SLAB_MAG_SIZE; |
485 | newmag->busy = 0; |
483 | newmag->busy = 0; |
486 | 484 | ||
487 | /* Flush last to magazine list */ |
485 | /* Flush last to magazine list */ |
488 | if (lastmag) |
486 | if (lastmag) |
489 | put_mag_to_cache(cache, lastmag); |
487 | put_mag_to_cache(cache, lastmag); |
490 | 488 | ||
491 | /* Move current as last, save new as current */ |
489 | /* Move current as last, save new as current */ |
492 | cache->mag_cache[CPU->id].last = cmag; |
490 | cache->mag_cache[CPU->id].last = cmag; |
493 | cache->mag_cache[CPU->id].current = newmag; |
491 | cache->mag_cache[CPU->id].current = newmag; |
494 | 492 | ||
495 | return newmag; |
493 | return newmag; |
496 | } |
494 | } |
497 | 495 | ||
498 | /** |
496 | /** |
499 | * Put object into CPU-cache magazine |
497 | * Put object into CPU-cache magazine |
500 | * |
498 | * |
501 | * @return 0 - success, -1 - could not get memory |
499 | * @return 0 - success, -1 - could not get memory |
502 | */ |
500 | */ |
503 | static int magazine_obj_put(slab_cache_t *cache, void *obj) |
501 | static int magazine_obj_put(slab_cache_t *cache, void *obj) |
504 | { |
502 | { |
505 | slab_magazine_t *mag; |
503 | slab_magazine_t *mag; |
506 | 504 | ||
507 | if (!CPU) |
505 | if (!CPU) |
508 | return -1; |
506 | return -1; |
509 | 507 | ||
510 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
508 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
511 | 509 | ||
512 | mag = make_empty_current_mag(cache); |
510 | mag = make_empty_current_mag(cache); |
513 | if (!mag) { |
511 | if (!mag) { |
514 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
512 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
515 | return -1; |
513 | return -1; |
516 | } |
514 | } |
517 | 515 | ||
518 | mag->objs[mag->busy++] = obj; |
516 | mag->objs[mag->busy++] = obj; |
519 | 517 | ||
520 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
518 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
521 | atomic_inc(&cache->cached_objs); |
519 | atomic_inc(&cache->cached_objs); |
522 | return 0; |
520 | return 0; |
523 | } |
521 | } |
524 | 522 | ||
525 | 523 | ||
526 | /**************************************/ |
524 | /**************************************/ |
527 | /* Slab cache functions */ |
525 | /* Slab cache functions */ |
528 | 526 | ||
529 | /** Return number of objects that fit in certain cache size */ |
527 | /** Return number of objects that fit in certain cache size */ |
530 | static unsigned int comp_objects(slab_cache_t *cache) |
528 | static unsigned int comp_objects(slab_cache_t *cache) |
531 | { |
529 | { |
532 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
530 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
533 | return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size; |
531 | return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / |
- | 532 | cache->size; |
|
534 | else |
533 | else |
535 | return (PAGE_SIZE << cache->order) / cache->size; |
534 | return (PAGE_SIZE << cache->order) / cache->size; |
536 | } |
535 | } |
537 | 536 | ||
538 | /** Return wasted space in slab */ |
537 | /** Return wasted space in slab */ |
539 | static unsigned int badness(slab_cache_t *cache) |
538 | static unsigned int badness(slab_cache_t *cache) |
540 | { |
539 | { |
541 | unsigned int objects; |
540 | unsigned int objects; |
542 | unsigned int ssize; |
541 | unsigned int ssize; |
543 | 542 | ||
544 | objects = comp_objects(cache); |
543 | objects = comp_objects(cache); |
545 | ssize = PAGE_SIZE << cache->order; |
544 | ssize = PAGE_SIZE << cache->order; |
546 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
545 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
547 | ssize -= sizeof(slab_t); |
546 | ssize -= sizeof(slab_t); |
548 | return ssize - objects * cache->size; |
547 | return ssize - objects * cache->size; |
549 | } |
548 | } |
550 | 549 | ||
551 | /** |
550 | /** |
552 | * Initialize mag_cache structure in slab cache |
551 | * Initialize mag_cache structure in slab cache |
553 | */ |
552 | */ |
554 | static void make_magcache(slab_cache_t *cache) |
553 | static void make_magcache(slab_cache_t *cache) |
555 | { |
554 | { |
556 | unsigned int i; |
555 | unsigned int i; |
557 | 556 | ||
558 | ASSERT(_slab_initialized >= 2); |
557 | ASSERT(_slab_initialized >= 2); |
559 | 558 | ||
560 | cache->mag_cache = malloc(sizeof(slab_mag_cache_t) * config.cpu_count,0); |
559 | cache->mag_cache = malloc(sizeof(slab_mag_cache_t) * config.cpu_count, |
- | 560 | 0); |
|
561 | for (i = 0; i < config.cpu_count; i++) { |
561 | for (i = 0; i < config.cpu_count; i++) { |
562 | memsetb(&cache->mag_cache[i], sizeof(cache->mag_cache[i]), 0); |
562 | memsetb(&cache->mag_cache[i], sizeof(cache->mag_cache[i]), 0); |
563 | spinlock_initialize(&cache->mag_cache[i].lock, "slab_maglock_cpu"); |
563 | spinlock_initialize(&cache->mag_cache[i].lock, |
- | 564 | "slab_maglock_cpu"); |
|
564 | } |
565 | } |
565 | } |
566 | } |
566 | 567 | ||
567 | /** Initialize allocated memory as a slab cache */ |
568 | /** Initialize allocated memory as a slab cache */ |
568 | static void |
569 | static void |
569 | _slab_cache_create(slab_cache_t *cache, |
570 | _slab_cache_create(slab_cache_t *cache, char *name, size_t size, size_t align, |
570 | char *name, |
- | |
571 | size_t size, |
- | |
572 | size_t align, |
- | |
573 | int (*constructor)(void *obj, int kmflag), |
571 | int (*constructor)(void *obj, int kmflag), int (*destructor)(void *obj), |
574 | int (*destructor)(void *obj), |
- | |
575 | int flags) |
572 | int flags) |
576 | { |
573 | { |
577 | int pages; |
574 | int pages; |
578 | ipl_t ipl; |
575 | ipl_t ipl; |
579 | 576 | ||
580 | memsetb(cache, sizeof(*cache), 0); |
577 | memsetb(cache, sizeof(*cache), 0); |
581 | cache->name = name; |
578 | cache->name = name; |
582 | 579 | ||
583 | if (align < sizeof(unative_t)) |
580 | if (align < sizeof(unative_t)) |
584 | align = sizeof(unative_t); |
581 | align = sizeof(unative_t); |
585 | size = ALIGN_UP(size, align); |
582 | size = ALIGN_UP(size, align); |
586 | 583 | ||
587 | cache->size = size; |
584 | cache->size = size; |
588 | 585 | ||
589 | cache->constructor = constructor; |
586 | cache->constructor = constructor; |
590 | cache->destructor = destructor; |
587 | cache->destructor = destructor; |
591 | cache->flags = flags; |
588 | cache->flags = flags; |
592 | 589 | ||
593 | list_initialize(&cache->full_slabs); |
590 | list_initialize(&cache->full_slabs); |
594 | list_initialize(&cache->partial_slabs); |
591 | list_initialize(&cache->partial_slabs); |
595 | list_initialize(&cache->magazines); |
592 | list_initialize(&cache->magazines); |
596 | spinlock_initialize(&cache->slablock, "slab_lock"); |
593 | spinlock_initialize(&cache->slablock, "slab_lock"); |
597 | spinlock_initialize(&cache->maglock, "slab_maglock"); |
594 | spinlock_initialize(&cache->maglock, "slab_maglock"); |
598 | if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) |
595 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) |
599 | make_magcache(cache); |
596 | make_magcache(cache); |
600 | 597 | ||
601 | /* Compute slab sizes, object counts in slabs etc. */ |
598 | /* Compute slab sizes, object counts in slabs etc. */ |
602 | if (cache->size < SLAB_INSIDE_SIZE) |
599 | if (cache->size < SLAB_INSIDE_SIZE) |
603 | cache->flags |= SLAB_CACHE_SLINSIDE; |
600 | cache->flags |= SLAB_CACHE_SLINSIDE; |
604 | 601 | ||
605 | /* Minimum slab order */ |
602 | /* Minimum slab order */ |
606 | pages = SIZE2FRAMES(cache->size); |
603 | pages = SIZE2FRAMES(cache->size); |
607 | /* We need the 2^order >= pages */ |
604 | /* We need the 2^order >= pages */ |
608 | if (pages == 1) |
605 | if (pages == 1) |
609 | cache->order = 0; |
606 | cache->order = 0; |
610 | else |
607 | else |
611 | cache->order = fnzb(pages-1)+1; |
608 | cache->order = fnzb(pages - 1) + 1; |
612 | 609 | ||
613 | while (badness(cache) > SLAB_MAX_BADNESS(cache)) { |
610 | while (badness(cache) > SLAB_MAX_BADNESS(cache)) { |
614 | cache->order += 1; |
611 | cache->order += 1; |
615 | } |
612 | } |
616 | cache->objects = comp_objects(cache); |
613 | cache->objects = comp_objects(cache); |
617 | /* If info fits in, put it inside */ |
614 | /* If info fits in, put it inside */ |
618 | if (badness(cache) > sizeof(slab_t)) |
615 | if (badness(cache) > sizeof(slab_t)) |
619 | cache->flags |= SLAB_CACHE_SLINSIDE; |
616 | cache->flags |= SLAB_CACHE_SLINSIDE; |
620 | 617 | ||
621 | /* Add cache to cache list */ |
618 | /* Add cache to cache list */ |
622 | ipl = interrupts_disable(); |
619 | ipl = interrupts_disable(); |
623 | spinlock_lock(&slab_cache_lock); |
620 | spinlock_lock(&slab_cache_lock); |
624 | 621 | ||
625 | list_append(&cache->link, &slab_cache_list); |
622 | list_append(&cache->link, &slab_cache_list); |
626 | 623 | ||
627 | spinlock_unlock(&slab_cache_lock); |
624 | spinlock_unlock(&slab_cache_lock); |
628 | interrupts_restore(ipl); |
625 | interrupts_restore(ipl); |
629 | } |
626 | } |
630 | 627 | ||
631 | /** Create slab cache */ |
628 | /** Create slab cache */ |
632 | slab_cache_t * slab_cache_create(char *name, |
- | |
633 | size_t size, |
629 | slab_cache_t * |
634 | size_t align, |
630 | slab_cache_create(char *name, size_t size, size_t align, |
635 | int (*constructor)(void *obj, int kmflag), |
631 | int (*constructor)(void *obj, int kmflag), int (*destructor)(void *obj), |
636 | int (*destructor)(void *obj), |
- | |
637 | int flags) |
632 | int flags) |
638 | { |
633 | { |
639 | slab_cache_t *cache; |
634 | slab_cache_t *cache; |
640 | 635 | ||
641 | cache = slab_alloc(&slab_cache_cache, 0); |
636 | cache = slab_alloc(&slab_cache_cache, 0); |
642 | _slab_cache_create(cache, name, size, align, constructor, destructor, |
637 | _slab_cache_create(cache, name, size, align, constructor, destructor, |
643 | flags); |
638 | flags); |
644 | return cache; |
639 | return cache; |
645 | } |
640 | } |
646 | 641 | ||
647 | /** |
642 | /** |
648 | * Reclaim space occupied by objects that are already free |
643 | * Reclaim space occupied by objects that are already free |
649 | * |
644 | * |
650 | * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing |
645 | * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing |
651 | * @return Number of freed pages |
646 | * @return Number of freed pages |
652 | */ |
647 | */ |
653 | static count_t _slab_reclaim(slab_cache_t *cache, int flags) |
648 | static count_t _slab_reclaim(slab_cache_t *cache, int flags) |
654 | { |
649 | { |
655 | unsigned int i; |
650 | unsigned int i; |
656 | slab_magazine_t *mag; |
651 | slab_magazine_t *mag; |
657 | count_t frames = 0; |
652 | count_t frames = 0; |
658 | int magcount; |
653 | int magcount; |
659 | 654 | ||
660 | if (cache->flags & SLAB_CACHE_NOMAGAZINE) |
655 | if (cache->flags & SLAB_CACHE_NOMAGAZINE) |
661 | return 0; /* Nothing to do */ |
656 | return 0; /* Nothing to do */ |
662 | 657 | ||
663 | /* We count up to original magazine count to avoid |
658 | /* We count up to original magazine count to avoid |
664 | * endless loop |
659 | * endless loop |
665 | */ |
660 | */ |
666 | magcount = atomic_get(&cache->magazine_counter); |
661 | magcount = atomic_get(&cache->magazine_counter); |
667 | while (magcount-- && (mag=get_mag_from_cache(cache,0))) { |
662 | while (magcount-- && (mag=get_mag_from_cache(cache, 0))) { |
668 | frames += magazine_destroy(cache,mag); |
663 | frames += magazine_destroy(cache,mag); |
669 | if (!(flags & SLAB_RECLAIM_ALL) && frames) |
664 | if (!(flags & SLAB_RECLAIM_ALL) && frames) |
670 | break; |
665 | break; |
671 | } |
666 | } |
672 | 667 | ||
673 | if (flags & SLAB_RECLAIM_ALL) { |
668 | if (flags & SLAB_RECLAIM_ALL) { |
674 | /* Free cpu-bound magazines */ |
669 | /* Free cpu-bound magazines */ |
675 | /* Destroy CPU magazines */ |
670 | /* Destroy CPU magazines */ |
676 | for (i = 0; i < config.cpu_count; i++) { |
671 | for (i = 0; i < config.cpu_count; i++) { |
677 | spinlock_lock(&cache->mag_cache[i].lock); |
672 | spinlock_lock(&cache->mag_cache[i].lock); |
678 | 673 | ||
679 | mag = cache->mag_cache[i].current; |
674 | mag = cache->mag_cache[i].current; |
680 | if (mag) |
675 | if (mag) |
681 | frames += magazine_destroy(cache, mag); |
676 | frames += magazine_destroy(cache, mag); |
682 | cache->mag_cache[i].current = NULL; |
677 | cache->mag_cache[i].current = NULL; |
683 | 678 | ||
684 | mag = cache->mag_cache[i].last; |
679 | mag = cache->mag_cache[i].last; |
685 | if (mag) |
680 | if (mag) |
686 | frames += magazine_destroy(cache, mag); |
681 | frames += magazine_destroy(cache, mag); |
687 | cache->mag_cache[i].last = NULL; |
682 | cache->mag_cache[i].last = NULL; |
688 | 683 | ||
689 | spinlock_unlock(&cache->mag_cache[i].lock); |
684 | spinlock_unlock(&cache->mag_cache[i].lock); |
690 | } |
685 | } |
691 | } |
686 | } |
692 | 687 | ||
693 | return frames; |
688 | return frames; |
694 | } |
689 | } |
695 | 690 | ||
696 | /** Check that there are no slabs and remove cache from system */ |
691 | /** Check that there are no slabs and remove cache from system */ |
697 | void slab_cache_destroy(slab_cache_t *cache) |
692 | void slab_cache_destroy(slab_cache_t *cache) |
698 | { |
693 | { |
699 | ipl_t ipl; |
694 | ipl_t ipl; |
700 | 695 | ||
701 | /* First remove cache from link, so that we don't need |
696 | /* First remove cache from link, so that we don't need |
702 | * to disable interrupts later |
697 | * to disable interrupts later |
703 | */ |
698 | */ |
704 | 699 | ||
705 | ipl = interrupts_disable(); |
700 | ipl = interrupts_disable(); |
706 | spinlock_lock(&slab_cache_lock); |
701 | spinlock_lock(&slab_cache_lock); |
707 | 702 | ||
708 | list_remove(&cache->link); |
703 | list_remove(&cache->link); |
709 | 704 | ||
710 | spinlock_unlock(&slab_cache_lock); |
705 | spinlock_unlock(&slab_cache_lock); |
711 | interrupts_restore(ipl); |
706 | interrupts_restore(ipl); |
712 | 707 | ||
713 | /* Do not lock anything, we assume the software is correct and |
708 | /* Do not lock anything, we assume the software is correct and |
714 | * does not touch the cache when it decides to destroy it */ |
709 | * does not touch the cache when it decides to destroy it */ |
715 | 710 | ||
716 | /* Destroy all magazines */ |
711 | /* Destroy all magazines */ |
717 | _slab_reclaim(cache, SLAB_RECLAIM_ALL); |
712 | _slab_reclaim(cache, SLAB_RECLAIM_ALL); |
718 | 713 | ||
719 | /* All slabs must be empty */ |
714 | /* All slabs must be empty */ |
720 | if (!list_empty(&cache->full_slabs) \ |
715 | if (!list_empty(&cache->full_slabs) || |
721 | || !list_empty(&cache->partial_slabs)) |
716 | !list_empty(&cache->partial_slabs)) |
722 | panic("Destroying cache that is not empty."); |
717 | panic("Destroying cache that is not empty."); |
723 | 718 | ||
724 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) |
719 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) |
725 | free(cache->mag_cache); |
720 | free(cache->mag_cache); |
726 | slab_free(&slab_cache_cache, cache); |
721 | slab_free(&slab_cache_cache, cache); |
727 | } |
722 | } |
728 | 723 | ||
729 | /** Allocate new object from cache - if no flags given, always returns |
724 | /** Allocate new object from cache - if no flags given, always returns memory */ |
730 | memory */ |
- | |
731 | void * slab_alloc(slab_cache_t *cache, int flags) |
725 | void *slab_alloc(slab_cache_t *cache, int flags) |
732 | { |
726 | { |
733 | ipl_t ipl; |
727 | ipl_t ipl; |
734 | void *result = NULL; |
728 | void *result = NULL; |
735 | 729 | ||
736 | /* Disable interrupts to avoid deadlocks with interrupt handlers */ |
730 | /* Disable interrupts to avoid deadlocks with interrupt handlers */ |
737 | ipl = interrupts_disable(); |
731 | ipl = interrupts_disable(); |
738 | 732 | ||
739 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) { |
733 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) { |
740 | result = magazine_obj_get(cache); |
734 | result = magazine_obj_get(cache); |
741 | } |
735 | } |
742 | if (!result) |
736 | if (!result) |
743 | result = slab_obj_create(cache, flags); |
737 | result = slab_obj_create(cache, flags); |
744 | 738 | ||
745 | interrupts_restore(ipl); |
739 | interrupts_restore(ipl); |
746 | 740 | ||
747 | if (result) |
741 | if (result) |
748 | atomic_inc(&cache->allocated_objs); |
742 | atomic_inc(&cache->allocated_objs); |
749 | 743 | ||
750 | return result; |
744 | return result; |
751 | } |
745 | } |
752 | 746 | ||
753 | /** Return object to cache, use slab if known */ |
747 | /** Return object to cache, use slab if known */ |
754 | static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) |
748 | static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) |
755 | { |
749 | { |
756 | ipl_t ipl; |
750 | ipl_t ipl; |
757 | 751 | ||
758 | ipl = interrupts_disable(); |
752 | ipl = interrupts_disable(); |
759 | 753 | ||
760 | if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \ |
754 | if ((cache->flags & SLAB_CACHE_NOMAGAZINE) || |
761 | || magazine_obj_put(cache, obj)) { |
755 | magazine_obj_put(cache, obj)) { |
762 | - | ||
763 | slab_obj_destroy(cache, obj, slab); |
756 | slab_obj_destroy(cache, obj, slab); |
764 | 757 | ||
765 | } |
758 | } |
766 | interrupts_restore(ipl); |
759 | interrupts_restore(ipl); |
767 | atomic_dec(&cache->allocated_objs); |
760 | atomic_dec(&cache->allocated_objs); |
768 | } |
761 | } |
769 | 762 | ||
770 | /** Return slab object to cache */ |
763 | /** Return slab object to cache */ |
771 | void slab_free(slab_cache_t *cache, void *obj) |
764 | void slab_free(slab_cache_t *cache, void *obj) |
772 | { |
765 | { |
773 | _slab_free(cache, obj, NULL); |
766 | _slab_free(cache, obj, NULL); |
774 | } |
767 | } |
775 | 768 | ||
776 | /* Go through all caches and reclaim what is possible */ |
769 | /* Go through all caches and reclaim what is possible */ |
777 | count_t slab_reclaim(int flags) |
770 | count_t slab_reclaim(int flags) |
778 | { |
771 | { |
779 | slab_cache_t *cache; |
772 | slab_cache_t *cache; |
780 | link_t *cur; |
773 | link_t *cur; |
781 | count_t frames = 0; |
774 | count_t frames = 0; |
782 | 775 | ||
783 | spinlock_lock(&slab_cache_lock); |
776 | spinlock_lock(&slab_cache_lock); |
784 | 777 | ||
785 | /* TODO: Add assert, that interrupts are disabled, otherwise |
778 | /* TODO: Add assert, that interrupts are disabled, otherwise |
786 | * memory allocation from interrupts can deadlock. |
779 | * memory allocation from interrupts can deadlock. |
787 | */ |
780 | */ |
788 | 781 | ||
789 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
782 | for (cur = slab_cache_list.next; cur != &slab_cache_list; |
- | 783 | cur = cur->next) { |
|
790 | cache = list_get_instance(cur, slab_cache_t, link); |
784 | cache = list_get_instance(cur, slab_cache_t, link); |
791 | frames += _slab_reclaim(cache, flags); |
785 | frames += _slab_reclaim(cache, flags); |
792 | } |
786 | } |
793 | 787 | ||
794 | spinlock_unlock(&slab_cache_lock); |
788 | spinlock_unlock(&slab_cache_lock); |
795 | 789 | ||
796 | return frames; |
790 | return frames; |
797 | } |
791 | } |
798 | 792 | ||
799 | 793 | ||
800 | /* Print list of slabs */ |
794 | /* Print list of slabs */ |
801 | void slab_print_list(void) |
795 | void slab_print_list(void) |
802 | { |
796 | { |
803 | slab_cache_t *cache; |
797 | slab_cache_t *cache; |
804 | link_t *cur; |
798 | link_t *cur; |
805 | ipl_t ipl; |
799 | ipl_t ipl; |
806 | 800 | ||
807 | ipl = interrupts_disable(); |
801 | ipl = interrupts_disable(); |
808 | spinlock_lock(&slab_cache_lock); |
802 | spinlock_lock(&slab_cache_lock); |
809 | printf("slab name size pages obj/pg slabs cached allocated ctl\n"); |
803 | printf("slab name size pages obj/pg slabs cached allocated" |
- | 804 | " ctl\n"); |
|
810 | printf("---------------- -------- ------ ------ ------ ------ --------- ---\n"); |
805 | printf("---------------- -------- ------ ------ ------ ------ ---------" |
- | 806 | " ---\n"); |
|
811 | 807 | ||
812 | for (cur = slab_cache_list.next; cur != &slab_cache_list; cur = cur->next) { |
808 | for (cur = slab_cache_list.next; cur != &slab_cache_list; |
- | 809 | cur = cur->next) { |
|
813 | cache = list_get_instance(cur, slab_cache_t, link); |
810 | cache = list_get_instance(cur, slab_cache_t, link); |
814 | 811 | ||
815 | printf("%-16s %8" PRIs " %6d %6u %6ld %6ld %9ld %-3s\n", |
812 | printf("%-16s %8" PRIs " %6d %6u %6ld %6ld %9ld %-3s\n", |
816 | cache->name, cache->size, (1 << cache->order), cache->objects, |
813 | cache->name, cache->size, (1 << cache->order), |
817 | atomic_get(&cache->allocated_slabs), atomic_get(&cache->cached_objs), |
814 | cache->objects, atomic_get(&cache->allocated_slabs), |
- | 815 | atomic_get(&cache->cached_objs), |
|
- | 816 | atomic_get(&cache->allocated_objs), |
|
818 | atomic_get(&cache->allocated_objs), cache->flags & SLAB_CACHE_SLINSIDE ? "in" : "out"); |
817 | cache->flags & SLAB_CACHE_SLINSIDE ? "in" : "out"); |
819 | } |
818 | } |
820 | spinlock_unlock(&slab_cache_lock); |
819 | spinlock_unlock(&slab_cache_lock); |
821 | interrupts_restore(ipl); |
820 | interrupts_restore(ipl); |
822 | } |
821 | } |
823 | 822 | ||
824 | void slab_cache_init(void) |
823 | void slab_cache_init(void) |
825 | { |
824 | { |
826 | int i, size; |
825 | int i, size; |
827 | 826 | ||
828 | /* Initialize magazine cache */ |
827 | /* Initialize magazine cache */ |
829 | _slab_cache_create(&mag_cache, |
828 | _slab_cache_create(&mag_cache, "slab_magazine", |
830 | "slab_magazine", |
- | |
831 | sizeof(slab_magazine_t) + SLAB_MAG_SIZE * sizeof(void*), |
829 | sizeof(slab_magazine_t) + SLAB_MAG_SIZE * sizeof(void*), |
832 | sizeof(uintptr_t), |
830 | sizeof(uintptr_t), NULL, NULL, SLAB_CACHE_NOMAGAZINE | |
833 | NULL, NULL, |
- | |
834 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
831 | SLAB_CACHE_SLINSIDE); |
835 | /* Initialize slab_cache cache */ |
832 | /* Initialize slab_cache cache */ |
836 | _slab_cache_create(&slab_cache_cache, |
833 | _slab_cache_create(&slab_cache_cache, "slab_cache", |
837 | "slab_cache", |
- | |
838 | sizeof(slab_cache_cache), |
834 | sizeof(slab_cache_cache), sizeof(uintptr_t), NULL, NULL, |
839 | sizeof(uintptr_t), |
- | |
840 | NULL, NULL, |
- | |
841 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
835 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
842 | /* Initialize external slab cache */ |
836 | /* Initialize external slab cache */ |
843 | slab_extern_cache = slab_cache_create("slab_extern", |
837 | slab_extern_cache = slab_cache_create("slab_extern", sizeof(slab_t), 0, |
844 | sizeof(slab_t), |
- | |
845 | 0, NULL, NULL, |
- | |
846 | SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED); |
838 | NULL, NULL, SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED); |
847 | 839 | ||
848 | /* Initialize structures for malloc */ |
840 | /* Initialize structures for malloc */ |
849 | for (i=0, size=(1 << SLAB_MIN_MALLOC_W); |
841 | for (i = 0, size = (1 << SLAB_MIN_MALLOC_W); |
850 | i < (SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1); |
842 | i < (SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1); |
851 | i++, size <<= 1) { |
843 | i++, size <<= 1) { |
852 | malloc_caches[i] = slab_cache_create(malloc_names[i], |
844 | malloc_caches[i] = slab_cache_create(malloc_names[i], size, 0, |
853 | size, 0, |
- | |
854 | NULL,NULL, SLAB_CACHE_MAGDEFERRED); |
845 | NULL, NULL, SLAB_CACHE_MAGDEFERRED); |
855 | } |
846 | } |
856 | #ifdef CONFIG_DEBUG |
847 | #ifdef CONFIG_DEBUG |
857 | _slab_initialized = 1; |
848 | _slab_initialized = 1; |
858 | #endif |
849 | #endif |
859 | } |
850 | } |
860 | 851 | ||
861 | /** Enable cpu_cache |
852 | /** Enable cpu_cache |
862 | * |
853 | * |
863 | * Kernel calls this function, when it knows the real number of |
854 | * Kernel calls this function, when it knows the real number of |
864 | * processors. |
855 | * processors. |
865 | * Allocate slab for cpucache and enable it on all existing |
856 | * Allocate slab for cpucache and enable it on all existing |
866 | * slabs that are SLAB_CACHE_MAGDEFERRED |
857 | * slabs that are SLAB_CACHE_MAGDEFERRED |
867 | */ |
858 | */ |
868 | void slab_enable_cpucache(void) |
859 | void slab_enable_cpucache(void) |
869 | { |
860 | { |
870 | link_t *cur; |
861 | link_t *cur; |
871 | slab_cache_t *s; |
862 | slab_cache_t *s; |
872 | 863 | ||
873 | #ifdef CONFIG_DEBUG |
864 | #ifdef CONFIG_DEBUG |
874 | _slab_initialized = 2; |
865 | _slab_initialized = 2; |
875 | #endif |
866 | #endif |
876 | 867 | ||
877 | spinlock_lock(&slab_cache_lock); |
868 | spinlock_lock(&slab_cache_lock); |
878 | 869 | ||
879 | for (cur=slab_cache_list.next; cur != &slab_cache_list;cur=cur->next){ |
870 | for (cur = slab_cache_list.next; cur != &slab_cache_list; |
- | 871 | cur = cur->next){ |
|
880 | s = list_get_instance(cur, slab_cache_t, link); |
872 | s = list_get_instance(cur, slab_cache_t, link); |
881 | if ((s->flags & SLAB_CACHE_MAGDEFERRED) != SLAB_CACHE_MAGDEFERRED) |
873 | if ((s->flags & SLAB_CACHE_MAGDEFERRED) != |
- | 874 | SLAB_CACHE_MAGDEFERRED) |
|
882 | continue; |
875 | continue; |
883 | make_magcache(s); |
876 | make_magcache(s); |
884 | s->flags &= ~SLAB_CACHE_MAGDEFERRED; |
877 | s->flags &= ~SLAB_CACHE_MAGDEFERRED; |
885 | } |
878 | } |
886 | 879 | ||
887 | spinlock_unlock(&slab_cache_lock); |
880 | spinlock_unlock(&slab_cache_lock); |
888 | } |
881 | } |
889 | 882 | ||
890 | /**************************************/ |
883 | /**************************************/ |
891 | /* kalloc/kfree functions */ |
884 | /* kalloc/kfree functions */ |
892 | void * malloc(unsigned int size, int flags) |
885 | void *malloc(unsigned int size, int flags) |
893 | { |
886 | { |
894 | ASSERT(_slab_initialized); |
887 | ASSERT(_slab_initialized); |
895 | ASSERT(size && size <= (1 << SLAB_MAX_MALLOC_W)); |
888 | ASSERT(size && size <= (1 << SLAB_MAX_MALLOC_W)); |
896 | 889 | ||
897 | if (size < (1 << SLAB_MIN_MALLOC_W)) |
890 | if (size < (1 << SLAB_MIN_MALLOC_W)) |
898 | size = (1 << SLAB_MIN_MALLOC_W); |
891 | size = (1 << SLAB_MIN_MALLOC_W); |
899 | 892 | ||
900 | int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1; |
893 | int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1; |
901 | 894 | ||
902 | return slab_alloc(malloc_caches[idx], flags); |
895 | return slab_alloc(malloc_caches[idx], flags); |
903 | } |
896 | } |
904 | 897 | ||
905 | void * realloc(void *ptr, unsigned int size, int flags) |
898 | void *realloc(void *ptr, unsigned int size, int flags) |
906 | { |
899 | { |
907 | ASSERT(_slab_initialized); |
900 | ASSERT(_slab_initialized); |
908 | ASSERT(size <= (1 << SLAB_MAX_MALLOC_W)); |
901 | ASSERT(size <= (1 << SLAB_MAX_MALLOC_W)); |
909 | 902 | ||
910 | void *new_ptr; |
903 | void *new_ptr; |
911 | 904 | ||
912 | if (size > 0) { |
905 | if (size > 0) { |
913 | if (size < (1 << SLAB_MIN_MALLOC_W)) |
906 | if (size < (1 << SLAB_MIN_MALLOC_W)) |
914 | size = (1 << SLAB_MIN_MALLOC_W); |
907 | size = (1 << SLAB_MIN_MALLOC_W); |
915 | int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1; |
908 | int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1; |
916 | 909 | ||
917 | new_ptr = slab_alloc(malloc_caches[idx], flags); |
910 | new_ptr = slab_alloc(malloc_caches[idx], flags); |
918 | } else |
911 | } else |
919 | new_ptr = NULL; |
912 | new_ptr = NULL; |
920 | 913 | ||
921 | if ((new_ptr != NULL) && (ptr != NULL)) { |
914 | if ((new_ptr != NULL) && (ptr != NULL)) { |
922 | slab_t *slab = obj2slab(ptr); |
915 | slab_t *slab = obj2slab(ptr); |
923 | memcpy(new_ptr, ptr, min(size, slab->cache->size)); |
916 | memcpy(new_ptr, ptr, min(size, slab->cache->size)); |
924 | } |
917 | } |
925 | 918 | ||
926 | if (ptr != NULL) |
919 | if (ptr != NULL) |
927 | free(ptr); |
920 | free(ptr); |
928 | 921 | ||
929 | return new_ptr; |
922 | return new_ptr; |
930 | } |
923 | } |
931 | 924 | ||
932 | void free(void *ptr) |
925 | void free(void *ptr) |
933 | { |
926 | { |
934 | if (!ptr) |
927 | if (!ptr) |
935 | return; |
928 | return; |
936 | 929 | ||
937 | slab_t *slab = obj2slab(ptr); |
930 | slab_t *slab = obj2slab(ptr); |
938 | _slab_free(slab->cache, ptr, slab); |
931 | _slab_free(slab->cache, ptr, slab); |
939 | } |
932 | } |
940 | 933 | ||
941 | /** @} |
934 | /** @} |
942 | */ |
935 | */ |
943 | 936 |