Rev 3057 | Rev 3180 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 3057 | Rev 3104 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (c) 2006 Ondrej Palkovsky |
2 | * Copyright (c) 2006 Ondrej Palkovsky |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /** @addtogroup genericmm |
29 | /** @addtogroup genericmm |
30 | * @{ |
30 | * @{ |
31 | */ |
31 | */ |
32 | 32 | ||
33 | /** |
33 | /** |
34 | * @file |
34 | * @file |
35 | * @brief Slab allocator. |
35 | * @brief Slab allocator. |
36 | * |
36 | * |
37 | * The slab allocator is closely modelled after OpenSolaris slab allocator. |
37 | * The slab allocator is closely modelled after OpenSolaris slab allocator. |
38 | * @see http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/ |
38 | * @see http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/ |
39 | * |
39 | * |
40 | * with the following exceptions: |
40 | * with the following exceptions: |
41 | * @li empty slabs are deallocated immediately |
41 | * @li empty slabs are deallocated immediately |
42 | * (in Linux they are kept in linked list, in Solaris ???) |
42 | * (in Linux they are kept in linked list, in Solaris ???) |
43 | * @li empty magazines are deallocated when not needed |
43 | * @li empty magazines are deallocated when not needed |
44 | * (in Solaris they are held in linked list in slab cache) |
44 | * (in Solaris they are held in linked list in slab cache) |
45 | * |
45 | * |
46 | * Following features are not currently supported but would be easy to do: |
46 | * Following features are not currently supported but would be easy to do: |
47 | * @li cache coloring |
47 | * @li cache coloring |
48 | * @li dynamic magazine growing (different magazine sizes are already |
48 | * @li dynamic magazine growing (different magazine sizes are already |
49 | * supported, but we would need to adjust allocation strategy) |
49 | * supported, but we would need to adjust allocation strategy) |
50 | * |
50 | * |
51 | * The slab allocator supports per-CPU caches ('magazines') to facilitate |
51 | * The slab allocator supports per-CPU caches ('magazines') to facilitate |
52 | * good SMP scaling. |
52 | * good SMP scaling. |
53 | * |
53 | * |
54 | * When a new object is being allocated, it is first checked, if it is |
54 | * When a new object is being allocated, it is first checked, if it is |
55 | * available in a CPU-bound magazine. If it is not found there, it is |
55 | * available in a CPU-bound magazine. If it is not found there, it is |
56 | * allocated from a CPU-shared slab - if a partially full one is found, |
56 | * allocated from a CPU-shared slab - if a partially full one is found, |
57 | * it is used, otherwise a new one is allocated. |
57 | * it is used, otherwise a new one is allocated. |
58 | * |
58 | * |
59 | * When an object is being deallocated, it is put to a CPU-bound magazine. |
59 | * When an object is being deallocated, it is put to a CPU-bound magazine. |
60 | * If there is no such magazine, a new one is allocated (if this fails, |
60 | * If there is no such magazine, a new one is allocated (if this fails, |
61 | * the object is deallocated into slab). If the magazine is full, it is |
61 | * the object is deallocated into slab). If the magazine is full, it is |
62 | * put into cpu-shared list of magazines and a new one is allocated. |
62 | * put into cpu-shared list of magazines and a new one is allocated. |
63 | * |
63 | * |
64 | * The CPU-bound magazine is actually a pair of magazines in order to avoid |
64 | * The CPU-bound magazine is actually a pair of magazines in order to avoid |
65 | * thrashing when somebody is allocating/deallocating 1 item at the magazine |
65 | * thrashing when somebody is allocating/deallocating 1 item at the magazine |
66 | * size boundary. LIFO order is enforced, which should avoid fragmentation |
66 | * size boundary. LIFO order is enforced, which should avoid fragmentation |
67 | * as much as possible. |
67 | * as much as possible. |
68 | * |
68 | * |
69 | * Every cache contains list of full slabs and list of partially full slabs. |
69 | * Every cache contains list of full slabs and list of partially full slabs. |
70 | * Empty slabs are immediately freed (thrashing will be avoided because |
70 | * Empty slabs are immediately freed (thrashing will be avoided because |
71 | * of magazines). |
71 | * of magazines). |
72 | * |
72 | * |
73 | * The slab information structure is kept inside the data area, if possible. |
73 | * The slab information structure is kept inside the data area, if possible. |
74 | * The cache can be marked that it should not use magazines. This is used |
74 | * The cache can be marked that it should not use magazines. This is used |
75 | * only for slab related caches to avoid deadlocks and infinite recursion |
75 | * only for slab related caches to avoid deadlocks and infinite recursion |
76 | * (the slab allocator uses itself for allocating all it's control structures). |
76 | * (the slab allocator uses itself for allocating all it's control structures). |
77 | * |
77 | * |
78 | * The slab allocator allocates a lot of space and does not free it. When |
78 | * The slab allocator allocates a lot of space and does not free it. When |
79 | * the frame allocator fails to allocate a frame, it calls slab_reclaim(). |
79 | * the frame allocator fails to allocate a frame, it calls slab_reclaim(). |
80 | * It tries 'light reclaim' first, then brutal reclaim. The light reclaim |
80 | * It tries 'light reclaim' first, then brutal reclaim. The light reclaim |
81 | * releases slabs from cpu-shared magazine-list, until at least 1 slab |
81 | * releases slabs from cpu-shared magazine-list, until at least 1 slab |
82 | * is deallocated in each cache (this algorithm should probably change). |
82 | * is deallocated in each cache (this algorithm should probably change). |
83 | * The brutal reclaim removes all cached objects, even from CPU-bound |
83 | * The brutal reclaim removes all cached objects, even from CPU-bound |
84 | * magazines. |
84 | * magazines. |
85 | * |
85 | * |
86 | * @todo |
86 | * @todo |
87 | * For better CPU-scaling the magazine allocation strategy should |
87 | * For better CPU-scaling the magazine allocation strategy should |
88 | * be extended. Currently, if the cache does not have magazine, it asks |
88 | * be extended. Currently, if the cache does not have magazine, it asks |
89 | * for non-cpu cached magazine cache to provide one. It might be feasible |
89 | * for non-cpu cached magazine cache to provide one. It might be feasible |
90 | * to add cpu-cached magazine cache (which would allocate it's magazines |
90 | * to add cpu-cached magazine cache (which would allocate it's magazines |
91 | * from non-cpu-cached mag. cache). This would provide a nice per-cpu |
91 | * from non-cpu-cached mag. cache). This would provide a nice per-cpu |
92 | * buffer. The other possibility is to use the per-cache |
92 | * buffer. The other possibility is to use the per-cache |
93 | * 'empty-magazine-list', which decreases competing for 1 per-system |
93 | * 'empty-magazine-list', which decreases competing for 1 per-system |
94 | * magazine cache. |
94 | * magazine cache. |
95 | * |
95 | * |
96 | * @todo |
96 | * @todo |
97 | * it might be good to add granularity of locks even to slab level, |
97 | * it might be good to add granularity of locks even to slab level, |
98 | * we could then try_spinlock over all partial slabs and thus improve |
98 | * we could then try_spinlock over all partial slabs and thus improve |
99 | * scalability even on slab level |
99 | * scalability even on slab level |
100 | */ |
100 | */ |
101 | 101 | ||
102 | #include <synch/spinlock.h> |
102 | #include <synch/spinlock.h> |
103 | #include <mm/slab.h> |
103 | #include <mm/slab.h> |
104 | #include <adt/list.h> |
104 | #include <adt/list.h> |
105 | #include <memstr.h> |
105 | #include <memstr.h> |
106 | #include <align.h> |
106 | #include <align.h> |
107 | #include <mm/frame.h> |
107 | #include <mm/frame.h> |
108 | #include <config.h> |
108 | #include <config.h> |
109 | #include <print.h> |
109 | #include <print.h> |
110 | #include <arch.h> |
110 | #include <arch.h> |
111 | #include <panic.h> |
111 | #include <panic.h> |
112 | #include <debug.h> |
112 | #include <debug.h> |
113 | #include <bitops.h> |
113 | #include <bitops.h> |
114 | #include <macros.h> |
114 | #include <macros.h> |
115 | 115 | ||
116 | SPINLOCK_INITIALIZE(slab_cache_lock); |
116 | SPINLOCK_INITIALIZE(slab_cache_lock); |
117 | static LIST_INITIALIZE(slab_cache_list); |
117 | static LIST_INITIALIZE(slab_cache_list); |
118 | 118 | ||
119 | /** Magazine cache */ |
119 | /** Magazine cache */ |
120 | static slab_cache_t mag_cache; |
120 | static slab_cache_t mag_cache; |
121 | /** Cache for cache descriptors */ |
121 | /** Cache for cache descriptors */ |
122 | static slab_cache_t slab_cache_cache; |
122 | static slab_cache_t slab_cache_cache; |
123 | /** Cache for external slab descriptors |
123 | /** Cache for external slab descriptors |
124 | * This time we want per-cpu cache, so do not make it static |
124 | * This time we want per-cpu cache, so do not make it static |
125 | * - using slab for internal slab structures will not deadlock, |
125 | * - using slab for internal slab structures will not deadlock, |
126 | * as all slab structures are 'small' - control structures of |
126 | * as all slab structures are 'small' - control structures of |
127 | * their caches do not require further allocation |
127 | * their caches do not require further allocation |
128 | */ |
128 | */ |
129 | static slab_cache_t *slab_extern_cache; |
129 | static slab_cache_t *slab_extern_cache; |
130 | /** Caches for malloc */ |
130 | /** Caches for malloc */ |
131 | static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1]; |
131 | static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1]; |
132 | char *malloc_names[] = { |
132 | char *malloc_names[] = { |
133 | "malloc-16", |
133 | "malloc-16", |
134 | "malloc-32", |
134 | "malloc-32", |
135 | "malloc-64", |
135 | "malloc-64", |
136 | "malloc-128", |
136 | "malloc-128", |
137 | "malloc-256", |
137 | "malloc-256", |
138 | "malloc-512", |
138 | "malloc-512", |
139 | "malloc-1K", |
139 | "malloc-1K", |
140 | "malloc-2K", |
140 | "malloc-2K", |
141 | "malloc-4K", |
141 | "malloc-4K", |
142 | "malloc-8K", |
142 | "malloc-8K", |
143 | "malloc-16K", |
143 | "malloc-16K", |
144 | "malloc-32K", |
144 | "malloc-32K", |
145 | "malloc-64K", |
145 | "malloc-64K", |
146 | "malloc-128K", |
146 | "malloc-128K", |
147 | "malloc-256K" |
147 | "malloc-256K" |
148 | }; |
148 | }; |
149 | 149 | ||
150 | /** Slab descriptor */ |
150 | /** Slab descriptor */ |
151 | typedef struct { |
151 | typedef struct { |
152 | slab_cache_t *cache; /**< Pointer to parent cache. */ |
152 | slab_cache_t *cache; /**< Pointer to parent cache. */ |
153 | link_t link; /**< List of full/partial slabs. */ |
153 | link_t link; /**< List of full/partial slabs. */ |
154 | void *start; /**< Start address of first available item. */ |
154 | void *start; /**< Start address of first available item. */ |
155 | count_t available; /**< Count of available items in this slab. */ |
155 | count_t available; /**< Count of available items in this slab. */ |
156 | index_t nextavail; /**< The index of next available item. */ |
156 | index_t nextavail; /**< The index of next available item. */ |
157 | } slab_t; |
157 | } slab_t; |
158 | 158 | ||
159 | #ifdef CONFIG_DEBUG |
159 | #ifdef CONFIG_DEBUG |
160 | static int _slab_initialized = 0; |
160 | static int _slab_initialized = 0; |
161 | #endif |
161 | #endif |
162 | 162 | ||
163 | /**************************************/ |
163 | /**************************************/ |
164 | /* Slab allocation functions */ |
164 | /* Slab allocation functions */ |
165 | 165 | ||
166 | /** |
166 | /** |
167 | * Allocate frames for slab space and initialize |
167 | * Allocate frames for slab space and initialize |
168 | * |
168 | * |
169 | */ |
169 | */ |
170 | static slab_t * slab_space_alloc(slab_cache_t *cache, int flags) |
170 | static slab_t * slab_space_alloc(slab_cache_t *cache, int flags) |
171 | { |
171 | { |
172 | void *data; |
172 | void *data; |
173 | slab_t *slab; |
173 | slab_t *slab; |
174 | size_t fsize; |
174 | size_t fsize; |
175 | unsigned int i; |
175 | unsigned int i; |
176 | unsigned int zone = 0; |
176 | unsigned int zone = 0; |
177 | 177 | ||
178 | data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone); |
178 | data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone); |
179 | if (!data) { |
179 | if (!data) { |
180 | return NULL; |
180 | return NULL; |
181 | } |
181 | } |
182 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) { |
182 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) { |
183 | slab = slab_alloc(slab_extern_cache, flags); |
183 | slab = slab_alloc(slab_extern_cache, flags); |
184 | if (!slab) { |
184 | if (!slab) { |
185 | frame_free(KA2PA(data)); |
185 | frame_free(KA2PA(data)); |
186 | return NULL; |
186 | return NULL; |
187 | } |
187 | } |
188 | } else { |
188 | } else { |
189 | fsize = (PAGE_SIZE << cache->order); |
189 | fsize = (PAGE_SIZE << cache->order); |
190 | slab = data + fsize - sizeof(*slab); |
190 | slab = data + fsize - sizeof(*slab); |
191 | } |
191 | } |
192 | 192 | ||
193 | /* Fill in slab structures */ |
193 | /* Fill in slab structures */ |
194 | for (i = 0; i < ((unsigned int) 1 << cache->order); i++) |
194 | for (i = 0; i < ((unsigned int) 1 << cache->order); i++) |
195 | frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab, zone); |
195 | frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab, zone); |
196 | 196 | ||
197 | slab->start = data; |
197 | slab->start = data; |
198 | slab->available = cache->objects; |
198 | slab->available = cache->objects; |
199 | slab->nextavail = 0; |
199 | slab->nextavail = 0; |
200 | slab->cache = cache; |
200 | slab->cache = cache; |
201 | 201 | ||
202 | for (i = 0; i < cache->objects; i++) |
202 | for (i = 0; i < cache->objects; i++) |
203 | *((int *) (slab->start + i*cache->size)) = i+1; |
203 | *((int *) (slab->start + i*cache->size)) = i+1; |
204 | 204 | ||
205 | atomic_inc(&cache->allocated_slabs); |
205 | atomic_inc(&cache->allocated_slabs); |
206 | return slab; |
206 | return slab; |
207 | } |
207 | } |
208 | 208 | ||
209 | /** |
209 | /** |
210 | * Deallocate space associated with slab |
210 | * Deallocate space associated with slab |
211 | * |
211 | * |
212 | * @return number of freed frames |
212 | * @return number of freed frames |
213 | */ |
213 | */ |
214 | static count_t slab_space_free(slab_cache_t *cache, slab_t *slab) |
214 | static count_t slab_space_free(slab_cache_t *cache, slab_t *slab) |
215 | { |
215 | { |
216 | frame_free(KA2PA(slab->start)); |
216 | frame_free(KA2PA(slab->start)); |
217 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) |
217 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) |
218 | slab_free(slab_extern_cache, slab); |
218 | slab_free(slab_extern_cache, slab); |
219 | 219 | ||
220 | atomic_dec(&cache->allocated_slabs); |
220 | atomic_dec(&cache->allocated_slabs); |
221 | 221 | ||
222 | return 1 << cache->order; |
222 | return 1 << cache->order; |
223 | } |
223 | } |
224 | 224 | ||
225 | /** Map object to slab structure */ |
225 | /** Map object to slab structure */ |
226 | static slab_t * obj2slab(void *obj) |
226 | static slab_t * obj2slab(void *obj) |
227 | { |
227 | { |
228 | return (slab_t *) frame_get_parent(ADDR2PFN(KA2PA(obj)), 0); |
228 | return (slab_t *) frame_get_parent(ADDR2PFN(KA2PA(obj)), 0); |
229 | } |
229 | } |
230 | 230 | ||
231 | /**************************************/ |
231 | /**************************************/ |
232 | /* Slab functions */ |
232 | /* Slab functions */ |
233 | 233 | ||
234 | 234 | ||
235 | /** |
235 | /** |
236 | * Return object to slab and call a destructor |
236 | * Return object to slab and call a destructor |
237 | * |
237 | * |
238 | * @param slab If the caller knows directly slab of the object, otherwise NULL |
238 | * @param slab If the caller knows directly slab of the object, otherwise NULL |
239 | * |
239 | * |
240 | * @return Number of freed pages |
240 | * @return Number of freed pages |
241 | */ |
241 | */ |
242 | static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, |
242 | static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, |
243 | slab_t *slab) |
243 | slab_t *slab) |
244 | { |
244 | { |
245 | int freed = 0; |
245 | int freed = 0; |
246 | 246 | ||
247 | if (!slab) |
247 | if (!slab) |
248 | slab = obj2slab(obj); |
248 | slab = obj2slab(obj); |
249 | 249 | ||
250 | ASSERT(slab->cache == cache); |
250 | ASSERT(slab->cache == cache); |
251 | 251 | ||
252 | if (cache->destructor) |
252 | if (cache->destructor) |
253 | freed = cache->destructor(obj); |
253 | freed = cache->destructor(obj); |
254 | 254 | ||
255 | spinlock_lock(&cache->slablock); |
255 | spinlock_lock(&cache->slablock); |
256 | ASSERT(slab->available < cache->objects); |
256 | ASSERT(slab->available < cache->objects); |
257 | 257 | ||
258 | *((int *)obj) = slab->nextavail; |
258 | *((int *)obj) = slab->nextavail; |
259 | slab->nextavail = (obj - slab->start)/cache->size; |
259 | slab->nextavail = (obj - slab->start)/cache->size; |
260 | slab->available++; |
260 | slab->available++; |
261 | 261 | ||
262 | /* Move it to correct list */ |
262 | /* Move it to correct list */ |
263 | if (slab->available == cache->objects) { |
263 | if (slab->available == cache->objects) { |
264 | /* Free associated memory */ |
264 | /* Free associated memory */ |
265 | list_remove(&slab->link); |
265 | list_remove(&slab->link); |
266 | spinlock_unlock(&cache->slablock); |
266 | spinlock_unlock(&cache->slablock); |
267 | 267 | ||
268 | return freed + slab_space_free(cache, slab); |
268 | return freed + slab_space_free(cache, slab); |
269 | 269 | ||
270 | } else if (slab->available == 1) { |
270 | } else if (slab->available == 1) { |
271 | /* It was in full, move to partial */ |
271 | /* It was in full, move to partial */ |
272 | list_remove(&slab->link); |
272 | list_remove(&slab->link); |
273 | list_prepend(&slab->link, &cache->partial_slabs); |
273 | list_prepend(&slab->link, &cache->partial_slabs); |
274 | } |
274 | } |
275 | spinlock_unlock(&cache->slablock); |
275 | spinlock_unlock(&cache->slablock); |
276 | return freed; |
276 | return freed; |
277 | } |
277 | } |
278 | 278 | ||
279 | /** |
279 | /** |
280 | * Take new object from slab or create new if needed |
280 | * Take new object from slab or create new if needed |
281 | * |
281 | * |
282 | * @return Object address or null |
282 | * @return Object address or null |
283 | */ |
283 | */ |
284 | static void * slab_obj_create(slab_cache_t *cache, int flags) |
284 | static void * slab_obj_create(slab_cache_t *cache, int flags) |
285 | { |
285 | { |
286 | slab_t *slab; |
286 | slab_t *slab; |
287 | void *obj; |
287 | void *obj; |
288 | 288 | ||
289 | spinlock_lock(&cache->slablock); |
289 | spinlock_lock(&cache->slablock); |
290 | 290 | ||
291 | if (list_empty(&cache->partial_slabs)) { |
291 | if (list_empty(&cache->partial_slabs)) { |
292 | /* Allow recursion and reclaiming |
292 | /* Allow recursion and reclaiming |
293 | * - this should work, as the slab control structures |
293 | * - this should work, as the slab control structures |
294 | * are small and do not need to allocate with anything |
294 | * are small and do not need to allocate with anything |
295 | * other than frame_alloc when they are allocating, |
295 | * other than frame_alloc when they are allocating, |
296 | * that's why we should get recursion at most 1-level deep |
296 | * that's why we should get recursion at most 1-level deep |
297 | */ |
297 | */ |
298 | spinlock_unlock(&cache->slablock); |
298 | spinlock_unlock(&cache->slablock); |
299 | slab = slab_space_alloc(cache, flags); |
299 | slab = slab_space_alloc(cache, flags); |
300 | if (!slab) |
300 | if (!slab) |
301 | return NULL; |
301 | return NULL; |
302 | spinlock_lock(&cache->slablock); |
302 | spinlock_lock(&cache->slablock); |
303 | } else { |
303 | } else { |
304 | slab = list_get_instance(cache->partial_slabs.next, slab_t, link); |
304 | slab = list_get_instance(cache->partial_slabs.next, slab_t, link); |
305 | list_remove(&slab->link); |
305 | list_remove(&slab->link); |
306 | } |
306 | } |
307 | obj = slab->start + slab->nextavail * cache->size; |
307 | obj = slab->start + slab->nextavail * cache->size; |
308 | slab->nextavail = *((int *)obj); |
308 | slab->nextavail = *((int *)obj); |
309 | slab->available--; |
309 | slab->available--; |
310 | 310 | ||
311 | if (!slab->available) |
311 | if (!slab->available) |
312 | list_prepend(&slab->link, &cache->full_slabs); |
312 | list_prepend(&slab->link, &cache->full_slabs); |
313 | else |
313 | else |
314 | list_prepend(&slab->link, &cache->partial_slabs); |
314 | list_prepend(&slab->link, &cache->partial_slabs); |
315 | 315 | ||
316 | spinlock_unlock(&cache->slablock); |
316 | spinlock_unlock(&cache->slablock); |
317 | 317 | ||
318 | if (cache->constructor && cache->constructor(obj, flags)) { |
318 | if (cache->constructor && cache->constructor(obj, flags)) { |
319 | /* Bad, bad, construction failed */ |
319 | /* Bad, bad, construction failed */ |
320 | slab_obj_destroy(cache, obj, slab); |
320 | slab_obj_destroy(cache, obj, slab); |
321 | return NULL; |
321 | return NULL; |
322 | } |
322 | } |
323 | return obj; |
323 | return obj; |
324 | } |
324 | } |
325 | 325 | ||
326 | /**************************************/ |
326 | /**************************************/ |
327 | /* CPU-Cache slab functions */ |
327 | /* CPU-Cache slab functions */ |
328 | 328 | ||
329 | /** |
329 | /** |
330 | * Finds a full magazine in cache, takes it from list |
330 | * Finds a full magazine in cache, takes it from list |
331 | * and returns it |
331 | * and returns it |
332 | * |
332 | * |
333 | * @param first If true, return first, else last mag |
333 | * @param first If true, return first, else last mag |
334 | */ |
334 | */ |
335 | static slab_magazine_t * get_mag_from_cache(slab_cache_t *cache, |
335 | static slab_magazine_t * get_mag_from_cache(slab_cache_t *cache, |
336 | int first) |
336 | int first) |
337 | { |
337 | { |
338 | slab_magazine_t *mag = NULL; |
338 | slab_magazine_t *mag = NULL; |
339 | link_t *cur; |
339 | link_t *cur; |
340 | 340 | ||
341 | spinlock_lock(&cache->maglock); |
341 | spinlock_lock(&cache->maglock); |
342 | if (!list_empty(&cache->magazines)) { |
342 | if (!list_empty(&cache->magazines)) { |
343 | if (first) |
343 | if (first) |
344 | cur = cache->magazines.next; |
344 | cur = cache->magazines.next; |
345 | else |
345 | else |
346 | cur = cache->magazines.prev; |
346 | cur = cache->magazines.prev; |
347 | mag = list_get_instance(cur, slab_magazine_t, link); |
347 | mag = list_get_instance(cur, slab_magazine_t, link); |
348 | list_remove(&mag->link); |
348 | list_remove(&mag->link); |
349 | atomic_dec(&cache->magazine_counter); |
349 | atomic_dec(&cache->magazine_counter); |
350 | } |
350 | } |
351 | spinlock_unlock(&cache->maglock); |
351 | spinlock_unlock(&cache->maglock); |
352 | return mag; |
352 | return mag; |
353 | } |
353 | } |
354 | 354 | ||
355 | /** Prepend magazine to magazine list in cache */ |
355 | /** Prepend magazine to magazine list in cache */ |
356 | static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag) |
356 | static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag) |
357 | { |
357 | { |
358 | spinlock_lock(&cache->maglock); |
358 | spinlock_lock(&cache->maglock); |
359 | 359 | ||
360 | list_prepend(&mag->link, &cache->magazines); |
360 | list_prepend(&mag->link, &cache->magazines); |
361 | atomic_inc(&cache->magazine_counter); |
361 | atomic_inc(&cache->magazine_counter); |
362 | 362 | ||
363 | spinlock_unlock(&cache->maglock); |
363 | spinlock_unlock(&cache->maglock); |
364 | } |
364 | } |
365 | 365 | ||
366 | /** |
366 | /** |
367 | * Free all objects in magazine and free memory associated with magazine |
367 | * Free all objects in magazine and free memory associated with magazine |
368 | * |
368 | * |
369 | * @return Number of freed pages |
369 | * @return Number of freed pages |
370 | */ |
370 | */ |
371 | static count_t magazine_destroy(slab_cache_t *cache, |
371 | static count_t magazine_destroy(slab_cache_t *cache, |
372 | slab_magazine_t *mag) |
372 | slab_magazine_t *mag) |
373 | { |
373 | { |
374 | unsigned int i; |
374 | unsigned int i; |
375 | count_t frames = 0; |
375 | count_t frames = 0; |
376 | 376 | ||
377 | for (i = 0; i < mag->busy; i++) { |
377 | for (i = 0; i < mag->busy; i++) { |
378 | frames += slab_obj_destroy(cache, mag->objs[i], NULL); |
378 | frames += slab_obj_destroy(cache, mag->objs[i], NULL); |
379 | atomic_dec(&cache->cached_objs); |
379 | atomic_dec(&cache->cached_objs); |
380 | } |
380 | } |
381 | 381 | ||
382 | slab_free(&mag_cache, mag); |
382 | slab_free(&mag_cache, mag); |
383 | 383 | ||
384 | return frames; |
384 | return frames; |
385 | } |
385 | } |
386 | 386 | ||
387 | /** |
387 | /** |
388 | * Find full magazine, set it as current and return it |
388 | * Find full magazine, set it as current and return it |
389 | * |
389 | * |
390 | * Assume cpu_magazine lock is held |
390 | * Assume cpu_magazine lock is held |
391 | */ |
391 | */ |
392 | static slab_magazine_t * get_full_current_mag(slab_cache_t *cache) |
392 | static slab_magazine_t * get_full_current_mag(slab_cache_t *cache) |
393 | { |
393 | { |
394 | slab_magazine_t *cmag, *lastmag, *newmag; |
394 | slab_magazine_t *cmag, *lastmag, *newmag; |
395 | 395 | ||
396 | cmag = cache->mag_cache[CPU->id].current; |
396 | cmag = cache->mag_cache[CPU->id].current; |
397 | lastmag = cache->mag_cache[CPU->id].last; |
397 | lastmag = cache->mag_cache[CPU->id].last; |
398 | if (cmag) { /* First try local CPU magazines */ |
398 | if (cmag) { /* First try local CPU magazines */ |
399 | if (cmag->busy) |
399 | if (cmag->busy) |
400 | return cmag; |
400 | return cmag; |
401 | 401 | ||
402 | if (lastmag && lastmag->busy) { |
402 | if (lastmag && lastmag->busy) { |
403 | cache->mag_cache[CPU->id].current = lastmag; |
403 | cache->mag_cache[CPU->id].current = lastmag; |
404 | cache->mag_cache[CPU->id].last = cmag; |
404 | cache->mag_cache[CPU->id].last = cmag; |
405 | return lastmag; |
405 | return lastmag; |
406 | } |
406 | } |
407 | } |
407 | } |
408 | /* Local magazines are empty, import one from magazine list */ |
408 | /* Local magazines are empty, import one from magazine list */ |
409 | newmag = get_mag_from_cache(cache, 1); |
409 | newmag = get_mag_from_cache(cache, 1); |
410 | if (!newmag) |
410 | if (!newmag) |
411 | return NULL; |
411 | return NULL; |
412 | 412 | ||
413 | if (lastmag) |
413 | if (lastmag) |
414 | magazine_destroy(cache, lastmag); |
414 | magazine_destroy(cache, lastmag); |
415 | 415 | ||
416 | cache->mag_cache[CPU->id].last = cmag; |
416 | cache->mag_cache[CPU->id].last = cmag; |
417 | cache->mag_cache[CPU->id].current = newmag; |
417 | cache->mag_cache[CPU->id].current = newmag; |
418 | return newmag; |
418 | return newmag; |
419 | } |
419 | } |
420 | 420 | ||
421 | /** |
421 | /** |
422 | * Try to find object in CPU-cache magazines |
422 | * Try to find object in CPU-cache magazines |
423 | * |
423 | * |
424 | * @return Pointer to object or NULL if not available |
424 | * @return Pointer to object or NULL if not available |
425 | */ |
425 | */ |
426 | static void * magazine_obj_get(slab_cache_t *cache) |
426 | static void * magazine_obj_get(slab_cache_t *cache) |
427 | { |
427 | { |
428 | slab_magazine_t *mag; |
428 | slab_magazine_t *mag; |
429 | void *obj; |
429 | void *obj; |
430 | 430 | ||
431 | if (!CPU) |
431 | if (!CPU) |
432 | return NULL; |
432 | return NULL; |
433 | 433 | ||
434 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
434 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
435 | 435 | ||
436 | mag = get_full_current_mag(cache); |
436 | mag = get_full_current_mag(cache); |
437 | if (!mag) { |
437 | if (!mag) { |
438 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
438 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
439 | return NULL; |
439 | return NULL; |
440 | } |
440 | } |
441 | obj = mag->objs[--mag->busy]; |
441 | obj = mag->objs[--mag->busy]; |
442 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
442 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
443 | atomic_dec(&cache->cached_objs); |
443 | atomic_dec(&cache->cached_objs); |
444 | 444 | ||
445 | return obj; |
445 | return obj; |
446 | } |
446 | } |
447 | 447 | ||
448 | /** |
448 | /** |
449 | * Assure that the current magazine is empty, return pointer to it, or NULL if |
449 | * Assure that the current magazine is empty, return pointer to it, or NULL if |
450 | * no empty magazine is available and cannot be allocated |
450 | * no empty magazine is available and cannot be allocated |
451 | * |
451 | * |
452 | * Assume mag_cache[CPU->id].lock is held |
452 | * Assume mag_cache[CPU->id].lock is held |
453 | * |
453 | * |
454 | * We have 2 magazines bound to processor. |
454 | * We have 2 magazines bound to processor. |
455 | * First try the current. |
455 | * First try the current. |
456 | * If full, try the last. |
456 | * If full, try the last. |
457 | * If full, put to magazines list. |
457 | * If full, put to magazines list. |
458 | * allocate new, exchange last & current |
458 | * allocate new, exchange last & current |
459 | * |
459 | * |
460 | */ |
460 | */ |
461 | static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache) |
461 | static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache) |
462 | { |
462 | { |
463 | slab_magazine_t *cmag,*lastmag,*newmag; |
463 | slab_magazine_t *cmag,*lastmag,*newmag; |
464 | 464 | ||
465 | cmag = cache->mag_cache[CPU->id].current; |
465 | cmag = cache->mag_cache[CPU->id].current; |
466 | lastmag = cache->mag_cache[CPU->id].last; |
466 | lastmag = cache->mag_cache[CPU->id].last; |
467 | 467 | ||
468 | if (cmag) { |
468 | if (cmag) { |
469 | if (cmag->busy < cmag->size) |
469 | if (cmag->busy < cmag->size) |
470 | return cmag; |
470 | return cmag; |
471 | if (lastmag && lastmag->busy < lastmag->size) { |
471 | if (lastmag && lastmag->busy < lastmag->size) { |
472 | cache->mag_cache[CPU->id].last = cmag; |
472 | cache->mag_cache[CPU->id].last = cmag; |
473 | cache->mag_cache[CPU->id].current = lastmag; |
473 | cache->mag_cache[CPU->id].current = lastmag; |
474 | return lastmag; |
474 | return lastmag; |
475 | } |
475 | } |
476 | } |
476 | } |
477 | /* current | last are full | nonexistent, allocate new */ |
477 | /* current | last are full | nonexistent, allocate new */ |
478 | /* We do not want to sleep just because of caching */ |
478 | /* We do not want to sleep just because of caching */ |
479 | /* Especially we do not want reclaiming to start, as |
479 | /* Especially we do not want reclaiming to start, as |
480 | * this would deadlock */ |
480 | * this would deadlock */ |
481 | newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); |
481 | newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); |
482 | if (!newmag) |
482 | if (!newmag) |
483 | return NULL; |
483 | return NULL; |
484 | newmag->size = SLAB_MAG_SIZE; |
484 | newmag->size = SLAB_MAG_SIZE; |
485 | newmag->busy = 0; |
485 | newmag->busy = 0; |
486 | 486 | ||
487 | /* Flush last to magazine list */ |
487 | /* Flush last to magazine list */ |
488 | if (lastmag) |
488 | if (lastmag) |
489 | put_mag_to_cache(cache, lastmag); |
489 | put_mag_to_cache(cache, lastmag); |
490 | 490 | ||
491 | /* Move current as last, save new as current */ |
491 | /* Move current as last, save new as current */ |
492 | cache->mag_cache[CPU->id].last = cmag; |
492 | cache->mag_cache[CPU->id].last = cmag; |
493 | cache->mag_cache[CPU->id].current = newmag; |
493 | cache->mag_cache[CPU->id].current = newmag; |
494 | 494 | ||
495 | return newmag; |
495 | return newmag; |
496 | } |
496 | } |
497 | 497 | ||
498 | /** |
498 | /** |
499 | * Put object into CPU-cache magazine |
499 | * Put object into CPU-cache magazine |
500 | * |
500 | * |
501 | * @return 0 - success, -1 - could not get memory |
501 | * @return 0 - success, -1 - could not get memory |
502 | */ |
502 | */ |
503 | static int magazine_obj_put(slab_cache_t *cache, void *obj) |
503 | static int magazine_obj_put(slab_cache_t *cache, void *obj) |
504 | { |
504 | { |
505 | slab_magazine_t *mag; |
505 | slab_magazine_t *mag; |
506 | 506 | ||
507 | if (!CPU) |
507 | if (!CPU) |
508 | return -1; |
508 | return -1; |
509 | 509 | ||
510 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
510 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
511 | 511 | ||
512 | mag = make_empty_current_mag(cache); |
512 | mag = make_empty_current_mag(cache); |
513 | if (!mag) { |
513 | if (!mag) { |
514 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
514 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
515 | return -1; |
515 | return -1; |
516 | } |
516 | } |
517 | 517 | ||
518 | mag->objs[mag->busy++] = obj; |
518 | mag->objs[mag->busy++] = obj; |
519 | 519 | ||
520 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
520 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
521 | atomic_inc(&cache->cached_objs); |
521 | atomic_inc(&cache->cached_objs); |
522 | return 0; |
522 | return 0; |
523 | } |
523 | } |
524 | 524 | ||
525 | 525 | ||
526 | /**************************************/ |
526 | /**************************************/ |
527 | /* Slab cache functions */ |
527 | /* Slab cache functions */ |
528 | 528 | ||
529 | /** Return number of objects that fit in certain cache size */ |
529 | /** Return number of objects that fit in certain cache size */ |
530 | static unsigned int comp_objects(slab_cache_t *cache) |
530 | static unsigned int comp_objects(slab_cache_t *cache) |
531 | { |
531 | { |
532 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
532 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
533 | return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size; |
533 | return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size; |
534 | else |
534 | else |
535 | return (PAGE_SIZE << cache->order) / cache->size; |
535 | return (PAGE_SIZE << cache->order) / cache->size; |
536 | } |
536 | } |
537 | 537 | ||
538 | /** Return wasted space in slab */ |
538 | /** Return wasted space in slab */ |
539 | static unsigned int badness(slab_cache_t *cache) |
539 | static unsigned int badness(slab_cache_t *cache) |
540 | { |
540 | { |
541 | unsigned int objects; |
541 | unsigned int objects; |
542 | unsigned int ssize; |
542 | unsigned int ssize; |
543 | 543 | ||
544 | objects = comp_objects(cache); |
544 | objects = comp_objects(cache); |
545 | ssize = PAGE_SIZE << cache->order; |
545 | ssize = PAGE_SIZE << cache->order; |
546 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
546 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
547 | ssize -= sizeof(slab_t); |
547 | ssize -= sizeof(slab_t); |
548 | return ssize - objects * cache->size; |
548 | return ssize - objects * cache->size; |
549 | } |
549 | } |
550 | 550 | ||
551 | /** |
551 | /** |
552 | * Initialize mag_cache structure in slab cache |
552 | * Initialize mag_cache structure in slab cache |
553 | */ |
553 | */ |
554 | static void make_magcache(slab_cache_t *cache) |
554 | static void make_magcache(slab_cache_t *cache) |
555 | { |
555 | { |
556 | unsigned int i; |
556 | unsigned int i; |
557 | 557 | ||
558 | ASSERT(_slab_initialized >= 2); |
558 | ASSERT(_slab_initialized >= 2); |
559 | 559 | ||
560 | cache->mag_cache = malloc(sizeof(slab_mag_cache_t) * config.cpu_count,0); |
560 | cache->mag_cache = malloc(sizeof(slab_mag_cache_t) * config.cpu_count,0); |
561 | for (i = 0; i < config.cpu_count; i++) { |
561 | for (i = 0; i < config.cpu_count; i++) { |
562 | memsetb((uintptr_t)&cache->mag_cache[i], |
- | |
563 | sizeof(cache->mag_cache[i]), 0); |
562 | memsetb(&cache->mag_cache[i], sizeof(cache->mag_cache[i]), 0); |
564 | spinlock_initialize(&cache->mag_cache[i].lock, "slab_maglock_cpu"); |
563 | spinlock_initialize(&cache->mag_cache[i].lock, "slab_maglock_cpu"); |
565 | } |
564 | } |
566 | } |
565 | } |
567 | 566 | ||
568 | /** Initialize allocated memory as a slab cache */ |
567 | /** Initialize allocated memory as a slab cache */ |
569 | static void |
568 | static void |
570 | _slab_cache_create(slab_cache_t *cache, |
569 | _slab_cache_create(slab_cache_t *cache, |
571 | char *name, |
570 | char *name, |
572 | size_t size, |
571 | size_t size, |
573 | size_t align, |
572 | size_t align, |
574 | int (*constructor)(void *obj, int kmflag), |
573 | int (*constructor)(void *obj, int kmflag), |
575 | int (*destructor)(void *obj), |
574 | int (*destructor)(void *obj), |
576 | int flags) |
575 | int flags) |
577 | { |
576 | { |
578 | int pages; |
577 | int pages; |
579 | ipl_t ipl; |
578 | ipl_t ipl; |
580 | 579 | ||
581 | memsetb((uintptr_t)cache, sizeof(*cache), 0); |
580 | memsetb(cache, sizeof(*cache), 0); |
582 | cache->name = name; |
581 | cache->name = name; |
583 | 582 | ||
584 | if (align < sizeof(unative_t)) |
583 | if (align < sizeof(unative_t)) |
585 | align = sizeof(unative_t); |
584 | align = sizeof(unative_t); |
586 | size = ALIGN_UP(size, align); |
585 | size = ALIGN_UP(size, align); |
587 | 586 | ||
588 | cache->size = size; |
587 | cache->size = size; |
589 | 588 | ||
590 | cache->constructor = constructor; |
589 | cache->constructor = constructor; |
591 | cache->destructor = destructor; |
590 | cache->destructor = destructor; |
592 | cache->flags = flags; |
591 | cache->flags = flags; |
593 | 592 | ||
594 | list_initialize(&cache->full_slabs); |
593 | list_initialize(&cache->full_slabs); |
595 | list_initialize(&cache->partial_slabs); |
594 | list_initialize(&cache->partial_slabs); |
596 | list_initialize(&cache->magazines); |
595 | list_initialize(&cache->magazines); |
597 | spinlock_initialize(&cache->slablock, "slab_lock"); |
596 | spinlock_initialize(&cache->slablock, "slab_lock"); |
598 | spinlock_initialize(&cache->maglock, "slab_maglock"); |
597 | spinlock_initialize(&cache->maglock, "slab_maglock"); |
599 | if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) |
598 | if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) |
600 | make_magcache(cache); |
599 | make_magcache(cache); |
601 | 600 | ||
602 | /* Compute slab sizes, object counts in slabs etc. */ |
601 | /* Compute slab sizes, object counts in slabs etc. */ |
603 | if (cache->size < SLAB_INSIDE_SIZE) |
602 | if (cache->size < SLAB_INSIDE_SIZE) |
604 | cache->flags |= SLAB_CACHE_SLINSIDE; |
603 | cache->flags |= SLAB_CACHE_SLINSIDE; |
605 | 604 | ||
606 | /* Minimum slab order */ |
605 | /* Minimum slab order */ |
607 | pages = SIZE2FRAMES(cache->size); |
606 | pages = SIZE2FRAMES(cache->size); |
608 | /* We need the 2^order >= pages */ |
607 | /* We need the 2^order >= pages */ |
609 | if (pages == 1) |
608 | if (pages == 1) |
610 | cache->order = 0; |
609 | cache->order = 0; |
611 | else |
610 | else |
612 | cache->order = fnzb(pages-1)+1; |
611 | cache->order = fnzb(pages-1)+1; |
613 | 612 | ||
614 | while (badness(cache) > SLAB_MAX_BADNESS(cache)) { |
613 | while (badness(cache) > SLAB_MAX_BADNESS(cache)) { |
615 | cache->order += 1; |
614 | cache->order += 1; |
616 | } |
615 | } |
617 | cache->objects = comp_objects(cache); |
616 | cache->objects = comp_objects(cache); |
618 | /* If info fits in, put it inside */ |
617 | /* If info fits in, put it inside */ |
619 | if (badness(cache) > sizeof(slab_t)) |
618 | if (badness(cache) > sizeof(slab_t)) |
620 | cache->flags |= SLAB_CACHE_SLINSIDE; |
619 | cache->flags |= SLAB_CACHE_SLINSIDE; |
621 | 620 | ||
622 | /* Add cache to cache list */ |
621 | /* Add cache to cache list */ |
623 | ipl = interrupts_disable(); |
622 | ipl = interrupts_disable(); |
624 | spinlock_lock(&slab_cache_lock); |
623 | spinlock_lock(&slab_cache_lock); |
625 | 624 | ||
626 | list_append(&cache->link, &slab_cache_list); |
625 | list_append(&cache->link, &slab_cache_list); |
627 | 626 | ||
628 | spinlock_unlock(&slab_cache_lock); |
627 | spinlock_unlock(&slab_cache_lock); |
629 | interrupts_restore(ipl); |
628 | interrupts_restore(ipl); |
630 | } |
629 | } |
631 | 630 | ||
632 | /** Create slab cache */ |
631 | /** Create slab cache */ |
633 | slab_cache_t * slab_cache_create(char *name, |
632 | slab_cache_t * slab_cache_create(char *name, |
634 | size_t size, |
633 | size_t size, |
635 | size_t align, |
634 | size_t align, |
636 | int (*constructor)(void *obj, int kmflag), |
635 | int (*constructor)(void *obj, int kmflag), |
637 | int (*destructor)(void *obj), |
636 | int (*destructor)(void *obj), |
638 | int flags) |
637 | int flags) |
639 | { |
638 | { |
640 | slab_cache_t *cache; |
639 | slab_cache_t *cache; |
641 | 640 | ||
642 | cache = slab_alloc(&slab_cache_cache, 0); |
641 | cache = slab_alloc(&slab_cache_cache, 0); |
643 | _slab_cache_create(cache, name, size, align, constructor, destructor, |
642 | _slab_cache_create(cache, name, size, align, constructor, destructor, |
644 | flags); |
643 | flags); |
645 | return cache; |
644 | return cache; |
646 | } |
645 | } |
647 | 646 | ||
648 | /** |
647 | /** |
649 | * Reclaim space occupied by objects that are already free |
648 | * Reclaim space occupied by objects that are already free |
650 | * |
649 | * |
651 | * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing |
650 | * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing |
652 | * @return Number of freed pages |
651 | * @return Number of freed pages |
653 | */ |
652 | */ |
654 | static count_t _slab_reclaim(slab_cache_t *cache, int flags) |
653 | static count_t _slab_reclaim(slab_cache_t *cache, int flags) |
655 | { |
654 | { |
656 | unsigned int i; |
655 | unsigned int i; |
657 | slab_magazine_t *mag; |
656 | slab_magazine_t *mag; |
658 | count_t frames = 0; |
657 | count_t frames = 0; |
659 | int magcount; |
658 | int magcount; |
660 | 659 | ||
661 | if (cache->flags & SLAB_CACHE_NOMAGAZINE) |
660 | if (cache->flags & SLAB_CACHE_NOMAGAZINE) |
662 | return 0; /* Nothing to do */ |
661 | return 0; /* Nothing to do */ |
663 | 662 | ||
664 | /* We count up to original magazine count to avoid |
663 | /* We count up to original magazine count to avoid |
665 | * endless loop |
664 | * endless loop |
666 | */ |
665 | */ |
667 | magcount = atomic_get(&cache->magazine_counter); |
666 | magcount = atomic_get(&cache->magazine_counter); |
668 | while (magcount-- && (mag=get_mag_from_cache(cache,0))) { |
667 | while (magcount-- && (mag=get_mag_from_cache(cache,0))) { |
669 | frames += magazine_destroy(cache,mag); |
668 | frames += magazine_destroy(cache,mag); |
670 | if (!(flags & SLAB_RECLAIM_ALL) && frames) |
669 | if (!(flags & SLAB_RECLAIM_ALL) && frames) |
671 | break; |
670 | break; |
672 | } |
671 | } |
673 | 672 | ||
674 | if (flags & SLAB_RECLAIM_ALL) { |
673 | if (flags & SLAB_RECLAIM_ALL) { |
675 | /* Free cpu-bound magazines */ |
674 | /* Free cpu-bound magazines */ |
676 | /* Destroy CPU magazines */ |
675 | /* Destroy CPU magazines */ |
677 | for (i = 0; i < config.cpu_count; i++) { |
676 | for (i = 0; i < config.cpu_count; i++) { |
678 | spinlock_lock(&cache->mag_cache[i].lock); |
677 | spinlock_lock(&cache->mag_cache[i].lock); |
679 | 678 | ||
680 | mag = cache->mag_cache[i].current; |
679 | mag = cache->mag_cache[i].current; |
681 | if (mag) |
680 | if (mag) |
682 | frames += magazine_destroy(cache, mag); |
681 | frames += magazine_destroy(cache, mag); |
683 | cache->mag_cache[i].current = NULL; |
682 | cache->mag_cache[i].current = NULL; |
684 | 683 | ||
685 | mag = cache->mag_cache[i].last; |
684 | mag = cache->mag_cache[i].last; |
686 | if (mag) |
685 | if (mag) |
687 | frames += magazine_destroy(cache, mag); |
686 | frames += magazine_destroy(cache, mag); |
688 | cache->mag_cache[i].last = NULL; |
687 | cache->mag_cache[i].last = NULL; |
689 | 688 | ||
690 | spinlock_unlock(&cache->mag_cache[i].lock); |
689 | spinlock_unlock(&cache->mag_cache[i].lock); |
691 | } |
690 | } |
692 | } |
691 | } |
693 | 692 | ||
694 | return frames; |
693 | return frames; |
695 | } |
694 | } |
696 | 695 | ||
697 | /** Check that there are no slabs and remove cache from system */ |
696 | /** Check that there are no slabs and remove cache from system */ |
698 | void slab_cache_destroy(slab_cache_t *cache) |
697 | void slab_cache_destroy(slab_cache_t *cache) |
699 | { |
698 | { |
700 | ipl_t ipl; |
699 | ipl_t ipl; |
701 | 700 | ||
702 | /* First remove cache from link, so that we don't need |
701 | /* First remove cache from link, so that we don't need |
703 | * to disable interrupts later |
702 | * to disable interrupts later |
704 | */ |
703 | */ |
705 | 704 | ||
706 | ipl = interrupts_disable(); |
705 | ipl = interrupts_disable(); |
707 | spinlock_lock(&slab_cache_lock); |
706 | spinlock_lock(&slab_cache_lock); |
708 | 707 | ||
709 | list_remove(&cache->link); |
708 | list_remove(&cache->link); |
710 | 709 | ||
711 | spinlock_unlock(&slab_cache_lock); |
710 | spinlock_unlock(&slab_cache_lock); |
712 | interrupts_restore(ipl); |
711 | interrupts_restore(ipl); |
713 | 712 | ||
714 | /* Do not lock anything, we assume the software is correct and |
713 | /* Do not lock anything, we assume the software is correct and |
715 | * does not touch the cache when it decides to destroy it */ |
714 | * does not touch the cache when it decides to destroy it */ |
716 | 715 | ||
717 | /* Destroy all magazines */ |
716 | /* Destroy all magazines */ |
718 | _slab_reclaim(cache, SLAB_RECLAIM_ALL); |
717 | _slab_reclaim(cache, SLAB_RECLAIM_ALL); |
719 | 718 | ||
720 | /* All slabs must be empty */ |
719 | /* All slabs must be empty */ |
721 | if (!list_empty(&cache->full_slabs) \ |
720 | if (!list_empty(&cache->full_slabs) \ |
722 | || !list_empty(&cache->partial_slabs)) |
721 | || !list_empty(&cache->partial_slabs)) |
723 | panic("Destroying cache that is not empty."); |
722 | panic("Destroying cache that is not empty."); |
724 | 723 | ||
725 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) |
724 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) |
726 | free(cache->mag_cache); |
725 | free(cache->mag_cache); |
727 | slab_free(&slab_cache_cache, cache); |
726 | slab_free(&slab_cache_cache, cache); |
728 | } |
727 | } |
729 | 728 | ||
730 | /** Allocate new object from cache - if no flags given, always returns |
729 | /** Allocate new object from cache - if no flags given, always returns |
731 | memory */ |
730 | memory */ |
732 | void * slab_alloc(slab_cache_t *cache, int flags) |
731 | void * slab_alloc(slab_cache_t *cache, int flags) |
733 | { |
732 | { |
734 | ipl_t ipl; |
733 | ipl_t ipl; |
735 | void *result = NULL; |
734 | void *result = NULL; |
736 | 735 | ||
737 | /* Disable interrupts to avoid deadlocks with interrupt handlers */ |
736 | /* Disable interrupts to avoid deadlocks with interrupt handlers */ |
738 | ipl = interrupts_disable(); |
737 | ipl = interrupts_disable(); |
739 | 738 | ||
740 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) { |
739 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) { |
741 | result = magazine_obj_get(cache); |
740 | result = magazine_obj_get(cache); |
742 | } |
741 | } |
743 | if (!result) |
742 | if (!result) |
744 | result = slab_obj_create(cache, flags); |
743 | result = slab_obj_create(cache, flags); |
745 | 744 | ||
746 | interrupts_restore(ipl); |
745 | interrupts_restore(ipl); |
747 | 746 | ||
748 | if (result) |
747 | if (result) |
749 | atomic_inc(&cache->allocated_objs); |
748 | atomic_inc(&cache->allocated_objs); |
750 | 749 | ||
751 | return result; |
750 | return result; |
752 | } |
751 | } |
753 | 752 | ||
754 | /** Return object to cache, use slab if known */ |
753 | /** Return object to cache, use slab if known */ |
755 | static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) |
754 | static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) |
756 | { |
755 | { |
757 | ipl_t ipl; |
756 | ipl_t ipl; |
758 | 757 | ||
759 | ipl = interrupts_disable(); |
758 | ipl = interrupts_disable(); |
760 | 759 | ||
761 | if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \ |
760 | if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \ |
762 | || magazine_obj_put(cache, obj)) { |
761 | || magazine_obj_put(cache, obj)) { |
763 | 762 | ||
764 | slab_obj_destroy(cache, obj, slab); |
763 | slab_obj_destroy(cache, obj, slab); |
765 | 764 | ||
766 | } |
765 | } |
767 | interrupts_restore(ipl); |
766 | interrupts_restore(ipl); |
768 | atomic_dec(&cache->allocated_objs); |
767 | atomic_dec(&cache->allocated_objs); |
769 | } |
768 | } |
770 | 769 | ||
771 | /** Return slab object to cache */ |
770 | /** Return slab object to cache */ |
772 | void slab_free(slab_cache_t *cache, void *obj) |
771 | void slab_free(slab_cache_t *cache, void *obj) |
773 | { |
772 | { |
774 | _slab_free(cache, obj, NULL); |
773 | _slab_free(cache, obj, NULL); |
775 | } |
774 | } |
776 | 775 | ||
777 | /* Go through all caches and reclaim what is possible */ |
776 | /* Go through all caches and reclaim what is possible */ |
778 | count_t slab_reclaim(int flags) |
777 | count_t slab_reclaim(int flags) |
779 | { |
778 | { |
780 | slab_cache_t *cache; |
779 | slab_cache_t *cache; |
781 | link_t *cur; |
780 | link_t *cur; |
782 | count_t frames = 0; |
781 | count_t frames = 0; |
783 | 782 | ||
784 | spinlock_lock(&slab_cache_lock); |
783 | spinlock_lock(&slab_cache_lock); |
785 | 784 | ||
786 | /* TODO: Add assert, that interrupts are disabled, otherwise |
785 | /* TODO: Add assert, that interrupts are disabled, otherwise |
787 | * memory allocation from interrupts can deadlock. |
786 | * memory allocation from interrupts can deadlock. |
788 | */ |
787 | */ |
789 | 788 | ||
790 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
789 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
791 | cache = list_get_instance(cur, slab_cache_t, link); |
790 | cache = list_get_instance(cur, slab_cache_t, link); |
792 | frames += _slab_reclaim(cache, flags); |
791 | frames += _slab_reclaim(cache, flags); |
793 | } |
792 | } |
794 | 793 | ||
795 | spinlock_unlock(&slab_cache_lock); |
794 | spinlock_unlock(&slab_cache_lock); |
796 | 795 | ||
797 | return frames; |
796 | return frames; |
798 | } |
797 | } |
799 | 798 | ||
800 | 799 | ||
801 | /* Print list of slabs */ |
800 | /* Print list of slabs */ |
802 | void slab_print_list(void) |
801 | void slab_print_list(void) |
803 | { |
802 | { |
804 | slab_cache_t *cache; |
803 | slab_cache_t *cache; |
805 | link_t *cur; |
804 | link_t *cur; |
806 | ipl_t ipl; |
805 | ipl_t ipl; |
807 | 806 | ||
808 | ipl = interrupts_disable(); |
807 | ipl = interrupts_disable(); |
809 | spinlock_lock(&slab_cache_lock); |
808 | spinlock_lock(&slab_cache_lock); |
810 | printf("slab name size pages obj/pg slabs cached allocated ctl\n"); |
809 | printf("slab name size pages obj/pg slabs cached allocated ctl\n"); |
811 | printf("---------------- -------- ------ ------ ------ ------ --------- ---\n"); |
810 | printf("---------------- -------- ------ ------ ------ ------ --------- ---\n"); |
812 | 811 | ||
813 | for (cur = slab_cache_list.next; cur != &slab_cache_list; cur = cur->next) { |
812 | for (cur = slab_cache_list.next; cur != &slab_cache_list; cur = cur->next) { |
814 | cache = list_get_instance(cur, slab_cache_t, link); |
813 | cache = list_get_instance(cur, slab_cache_t, link); |
815 | 814 | ||
816 | printf("%-16s %8" PRIs " %6d %6u %6ld %6ld %9ld %-3s\n", |
815 | printf("%-16s %8" PRIs " %6d %6u %6ld %6ld %9ld %-3s\n", |
817 | cache->name, cache->size, (1 << cache->order), cache->objects, |
816 | cache->name, cache->size, (1 << cache->order), cache->objects, |
818 | atomic_get(&cache->allocated_slabs), atomic_get(&cache->cached_objs), |
817 | atomic_get(&cache->allocated_slabs), atomic_get(&cache->cached_objs), |
819 | atomic_get(&cache->allocated_objs), cache->flags & SLAB_CACHE_SLINSIDE ? "in" : "out"); |
818 | atomic_get(&cache->allocated_objs), cache->flags & SLAB_CACHE_SLINSIDE ? "in" : "out"); |
820 | } |
819 | } |
821 | spinlock_unlock(&slab_cache_lock); |
820 | spinlock_unlock(&slab_cache_lock); |
822 | interrupts_restore(ipl); |
821 | interrupts_restore(ipl); |
823 | } |
822 | } |
824 | 823 | ||
825 | void slab_cache_init(void) |
824 | void slab_cache_init(void) |
826 | { |
825 | { |
827 | int i, size; |
826 | int i, size; |
828 | 827 | ||
829 | /* Initialize magazine cache */ |
828 | /* Initialize magazine cache */ |
830 | _slab_cache_create(&mag_cache, |
829 | _slab_cache_create(&mag_cache, |
831 | "slab_magazine", |
830 | "slab_magazine", |
832 | sizeof(slab_magazine_t) + SLAB_MAG_SIZE * sizeof(void*), |
831 | sizeof(slab_magazine_t) + SLAB_MAG_SIZE * sizeof(void*), |
833 | sizeof(uintptr_t), |
832 | sizeof(uintptr_t), |
834 | NULL, NULL, |
833 | NULL, NULL, |
835 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
834 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
836 | /* Initialize slab_cache cache */ |
835 | /* Initialize slab_cache cache */ |
837 | _slab_cache_create(&slab_cache_cache, |
836 | _slab_cache_create(&slab_cache_cache, |
838 | "slab_cache", |
837 | "slab_cache", |
839 | sizeof(slab_cache_cache), |
838 | sizeof(slab_cache_cache), |
840 | sizeof(uintptr_t), |
839 | sizeof(uintptr_t), |
841 | NULL, NULL, |
840 | NULL, NULL, |
842 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
841 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
843 | /* Initialize external slab cache */ |
842 | /* Initialize external slab cache */ |
844 | slab_extern_cache = slab_cache_create("slab_extern", |
843 | slab_extern_cache = slab_cache_create("slab_extern", |
845 | sizeof(slab_t), |
844 | sizeof(slab_t), |
846 | 0, NULL, NULL, |
845 | 0, NULL, NULL, |
847 | SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED); |
846 | SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED); |
848 | 847 | ||
849 | /* Initialize structures for malloc */ |
848 | /* Initialize structures for malloc */ |
850 | for (i=0, size=(1 << SLAB_MIN_MALLOC_W); |
849 | for (i=0, size=(1 << SLAB_MIN_MALLOC_W); |
851 | i < (SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1); |
850 | i < (SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1); |
852 | i++, size <<= 1) { |
851 | i++, size <<= 1) { |
853 | malloc_caches[i] = slab_cache_create(malloc_names[i], |
852 | malloc_caches[i] = slab_cache_create(malloc_names[i], |
854 | size, 0, |
853 | size, 0, |
855 | NULL,NULL, SLAB_CACHE_MAGDEFERRED); |
854 | NULL,NULL, SLAB_CACHE_MAGDEFERRED); |
856 | } |
855 | } |
857 | #ifdef CONFIG_DEBUG |
856 | #ifdef CONFIG_DEBUG |
858 | _slab_initialized = 1; |
857 | _slab_initialized = 1; |
859 | #endif |
858 | #endif |
860 | } |
859 | } |
861 | 860 | ||
862 | /** Enable cpu_cache |
861 | /** Enable cpu_cache |
863 | * |
862 | * |
864 | * Kernel calls this function, when it knows the real number of |
863 | * Kernel calls this function, when it knows the real number of |
865 | * processors. |
864 | * processors. |
866 | * Allocate slab for cpucache and enable it on all existing |
865 | * Allocate slab for cpucache and enable it on all existing |
867 | * slabs that are SLAB_CACHE_MAGDEFERRED |
866 | * slabs that are SLAB_CACHE_MAGDEFERRED |
868 | */ |
867 | */ |
869 | void slab_enable_cpucache(void) |
868 | void slab_enable_cpucache(void) |
870 | { |
869 | { |
871 | link_t *cur; |
870 | link_t *cur; |
872 | slab_cache_t *s; |
871 | slab_cache_t *s; |
873 | 872 | ||
874 | #ifdef CONFIG_DEBUG |
873 | #ifdef CONFIG_DEBUG |
875 | _slab_initialized = 2; |
874 | _slab_initialized = 2; |
876 | #endif |
875 | #endif |
877 | 876 | ||
878 | spinlock_lock(&slab_cache_lock); |
877 | spinlock_lock(&slab_cache_lock); |
879 | 878 | ||
880 | for (cur=slab_cache_list.next; cur != &slab_cache_list;cur=cur->next){ |
879 | for (cur=slab_cache_list.next; cur != &slab_cache_list;cur=cur->next){ |
881 | s = list_get_instance(cur, slab_cache_t, link); |
880 | s = list_get_instance(cur, slab_cache_t, link); |
882 | if ((s->flags & SLAB_CACHE_MAGDEFERRED) != SLAB_CACHE_MAGDEFERRED) |
881 | if ((s->flags & SLAB_CACHE_MAGDEFERRED) != SLAB_CACHE_MAGDEFERRED) |
883 | continue; |
882 | continue; |
884 | make_magcache(s); |
883 | make_magcache(s); |
885 | s->flags &= ~SLAB_CACHE_MAGDEFERRED; |
884 | s->flags &= ~SLAB_CACHE_MAGDEFERRED; |
886 | } |
885 | } |
887 | 886 | ||
888 | spinlock_unlock(&slab_cache_lock); |
887 | spinlock_unlock(&slab_cache_lock); |
889 | } |
888 | } |
890 | 889 | ||
891 | /**************************************/ |
890 | /**************************************/ |
892 | /* kalloc/kfree functions */ |
891 | /* kalloc/kfree functions */ |
893 | void * malloc(unsigned int size, int flags) |
892 | void * malloc(unsigned int size, int flags) |
894 | { |
893 | { |
895 | ASSERT(_slab_initialized); |
894 | ASSERT(_slab_initialized); |
896 | ASSERT(size && size <= (1 << SLAB_MAX_MALLOC_W)); |
895 | ASSERT(size && size <= (1 << SLAB_MAX_MALLOC_W)); |
897 | 896 | ||
898 | if (size < (1 << SLAB_MIN_MALLOC_W)) |
897 | if (size < (1 << SLAB_MIN_MALLOC_W)) |
899 | size = (1 << SLAB_MIN_MALLOC_W); |
898 | size = (1 << SLAB_MIN_MALLOC_W); |
900 | 899 | ||
901 | int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1; |
900 | int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1; |
902 | 901 | ||
903 | return slab_alloc(malloc_caches[idx], flags); |
902 | return slab_alloc(malloc_caches[idx], flags); |
904 | } |
903 | } |
905 | 904 | ||
906 | void * realloc(void *ptr, unsigned int size, int flags) |
905 | void * realloc(void *ptr, unsigned int size, int flags) |
907 | { |
906 | { |
908 | ASSERT(_slab_initialized); |
907 | ASSERT(_slab_initialized); |
909 | ASSERT(size <= (1 << SLAB_MAX_MALLOC_W)); |
908 | ASSERT(size <= (1 << SLAB_MAX_MALLOC_W)); |
910 | 909 | ||
911 | void *new_ptr; |
910 | void *new_ptr; |
912 | 911 | ||
913 | if (size > 0) { |
912 | if (size > 0) { |
914 | if (size < (1 << SLAB_MIN_MALLOC_W)) |
913 | if (size < (1 << SLAB_MIN_MALLOC_W)) |
915 | size = (1 << SLAB_MIN_MALLOC_W); |
914 | size = (1 << SLAB_MIN_MALLOC_W); |
916 | int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1; |
915 | int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1; |
917 | 916 | ||
918 | new_ptr = slab_alloc(malloc_caches[idx], flags); |
917 | new_ptr = slab_alloc(malloc_caches[idx], flags); |
919 | } else |
918 | } else |
920 | new_ptr = NULL; |
919 | new_ptr = NULL; |
921 | 920 | ||
922 | if ((new_ptr != NULL) && (ptr != NULL)) { |
921 | if ((new_ptr != NULL) && (ptr != NULL)) { |
923 | slab_t *slab = obj2slab(ptr); |
922 | slab_t *slab = obj2slab(ptr); |
924 | memcpy(new_ptr, ptr, min(size, slab->cache->size)); |
923 | memcpy(new_ptr, ptr, min(size, slab->cache->size)); |
925 | } |
924 | } |
926 | 925 | ||
927 | if (ptr != NULL) |
926 | if (ptr != NULL) |
928 | free(ptr); |
927 | free(ptr); |
929 | 928 | ||
930 | return new_ptr; |
929 | return new_ptr; |
931 | } |
930 | } |
932 | 931 | ||
933 | void free(void *ptr) |
932 | void free(void *ptr) |
934 | { |
933 | { |
935 | if (!ptr) |
934 | if (!ptr) |
936 | return; |
935 | return; |
937 | 936 | ||
938 | slab_t *slab = obj2slab(ptr); |
937 | slab_t *slab = obj2slab(ptr); |
939 | _slab_free(slab->cache, ptr, slab); |
938 | _slab_free(slab->cache, ptr, slab); |
940 | } |
939 | } |
941 | 940 | ||
942 | /** @} |
941 | /** @} |
943 | */ |
942 | */ |
944 | 943 |