Rev 2123 | Rev 2745 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
759 | palkovsky | 1 | /* |
2071 | jermar | 2 | * Copyright (c) 2006 Ondrej Palkovsky |
759 | palkovsky | 3 | * All rights reserved. |
4 | * |
||
5 | * Redistribution and use in source and binary forms, with or without |
||
6 | * modification, are permitted provided that the following conditions |
||
7 | * are met: |
||
8 | * |
||
9 | * - Redistributions of source code must retain the above copyright |
||
10 | * notice, this list of conditions and the following disclaimer. |
||
11 | * - Redistributions in binary form must reproduce the above copyright |
||
12 | * notice, this list of conditions and the following disclaimer in the |
||
13 | * documentation and/or other materials provided with the distribution. |
||
14 | * - The name of the author may not be used to endorse or promote products |
||
15 | * derived from this software without specific prior written permission. |
||
16 | * |
||
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
27 | */ |
||
28 | |||
1757 | jermar | 29 | /** @addtogroup genericmm |
1702 | cejka | 30 | * @{ |
31 | */ |
||
32 | |||
1248 | jermar | 33 | /** |
1702 | cejka | 34 | * @file |
1248 | jermar | 35 | * @brief Slab allocator. |
769 | palkovsky | 36 | * |
1248 | jermar | 37 | * The slab allocator is closely modelled after OpenSolaris slab allocator. |
38 | * @see http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/ |
||
39 | * |
||
769 | palkovsky | 40 | * with the following exceptions: |
1248 | jermar | 41 | * @li empty slabs are deallocated immediately |
769 | palkovsky | 42 | * (in Linux they are kept in linked list, in Solaris ???) |
1248 | jermar | 43 | * @li empty magazines are deallocated when not needed |
769 | palkovsky | 44 | * (in Solaris they are held in linked list in slab cache) |
45 | * |
||
1248 | jermar | 46 | * Following features are not currently supported but would be easy to do: |
47 | * @li cache coloring |
||
48 | * @li dynamic magazine growing (different magazine sizes are already |
||
1144 | jermar | 49 | * supported, but we would need to adjust allocation strategy) |
769 | palkovsky | 50 | * |
1248 | jermar | 51 | * The slab allocator supports per-CPU caches ('magazines') to facilitate |
769 | palkovsky | 52 | * good SMP scaling. |
53 | * |
||
54 | * When a new object is being allocated, it is first checked, if it is |
||
1554 | jermar | 55 | * available in a CPU-bound magazine. If it is not found there, it is |
56 | * allocated from a CPU-shared slab - if a partially full one is found, |
||
57 | * it is used, otherwise a new one is allocated. |
||
769 | palkovsky | 58 | * |
1554 | jermar | 59 | * When an object is being deallocated, it is put to a CPU-bound magazine. |
60 | * If there is no such magazine, a new one is allocated (if this fails, |
||
1248 | jermar | 61 | * the object is deallocated into slab). If the magazine is full, it is |
1554 | jermar | 62 | * put into cpu-shared list of magazines and a new one is allocated. |
769 | palkovsky | 63 | * |
1554 | jermar | 64 | * The CPU-bound magazine is actually a pair of magazines in order to avoid |
769 | palkovsky | 65 | * thrashing when somebody is allocating/deallocating 1 item at the magazine |
66 | * size boundary. LIFO order is enforced, which should avoid fragmentation |
||
67 | * as much as possible. |
||
68 | * |
||
1554 | jermar | 69 | * Every cache contains list of full slabs and list of partially full slabs. |
1248 | jermar | 70 | * Empty slabs are immediately freed (thrashing will be avoided because |
769 | palkovsky | 71 | * of magazines). |
72 | * |
||
1248 | jermar | 73 | * The slab information structure is kept inside the data area, if possible. |
769 | palkovsky | 74 | * The cache can be marked that it should not use magazines. This is used |
1248 | jermar | 75 | * only for slab related caches to avoid deadlocks and infinite recursion |
76 | * (the slab allocator uses itself for allocating all it's control structures). |
||
769 | palkovsky | 77 | * |
1554 | jermar | 78 | * The slab allocator allocates a lot of space and does not free it. When |
79 | * the frame allocator fails to allocate a frame, it calls slab_reclaim(). |
||
769 | palkovsky | 80 | * It tries 'light reclaim' first, then brutal reclaim. The light reclaim |
81 | * releases slabs from cpu-shared magazine-list, until at least 1 slab |
||
82 | * is deallocated in each cache (this algorithm should probably change). |
||
83 | * The brutal reclaim removes all cached objects, even from CPU-bound |
||
84 | * magazines. |
||
85 | * |
||
1757 | jermar | 86 | * @todo |
1248 | jermar | 87 | * For better CPU-scaling the magazine allocation strategy should |
775 | palkovsky | 88 | * be extended. Currently, if the cache does not have magazine, it asks |
89 | * for non-cpu cached magazine cache to provide one. It might be feasible |
||
90 | * to add cpu-cached magazine cache (which would allocate it's magazines |
||
91 | * from non-cpu-cached mag. cache). This would provide a nice per-cpu |
||
92 | * buffer. The other possibility is to use the per-cache |
||
93 | * 'empty-magazine-list', which decreases competing for 1 per-system |
||
94 | * magazine cache. |
||
95 | * |
||
1757 | jermar | 96 | * @todo |
97 | * it might be good to add granularity of locks even to slab level, |
||
98 | * we could then try_spinlock over all partial slabs and thus improve |
||
99 | * scalability even on slab level |
||
769 | palkovsky | 100 | */ |
101 | |||
759 | palkovsky | 102 | #include <synch/spinlock.h> |
103 | #include <mm/slab.h> |
||
788 | jermar | 104 | #include <adt/list.h> |
759 | palkovsky | 105 | #include <memstr.h> |
106 | #include <align.h> |
||
762 | palkovsky | 107 | #include <mm/frame.h> |
759 | palkovsky | 108 | #include <config.h> |
109 | #include <print.h> |
||
110 | #include <arch.h> |
||
111 | #include <panic.h> |
||
762 | palkovsky | 112 | #include <debug.h> |
771 | palkovsky | 113 | #include <bitops.h> |
2124 | decky | 114 | #include <macros.h> |
759 | palkovsky | 115 | |
116 | SPINLOCK_INITIALIZE(slab_cache_lock); |
||
769 | palkovsky | 117 | static LIST_INITIALIZE(slab_cache_list); |
759 | palkovsky | 118 | |
769 | palkovsky | 119 | /** Magazine cache */ |
120 | static slab_cache_t mag_cache; |
||
121 | /** Cache for cache descriptors */ |
||
122 | static slab_cache_t slab_cache_cache; |
||
123 | /** Cache for external slab descriptors |
||
124 | * This time we want per-cpu cache, so do not make it static |
||
1248 | jermar | 125 | * - using slab for internal slab structures will not deadlock, |
769 | palkovsky | 126 | * as all slab structures are 'small' - control structures of |
127 | * their caches do not require further allocation |
||
128 | */ |
||
129 | static slab_cache_t *slab_extern_cache; |
||
771 | palkovsky | 130 | /** Caches for malloc */ |
2124 | decky | 131 | static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W - SLAB_MIN_MALLOC_W + 1]; |
771 | palkovsky | 132 | char *malloc_names[] = { |
2124 | decky | 133 | "malloc-16", |
134 | "malloc-32", |
||
135 | "malloc-64", |
||
136 | "malloc-128", |
||
137 | "malloc-256", |
||
138 | "malloc-512", |
||
139 | "malloc-1K", |
||
140 | "malloc-2K", |
||
141 | "malloc-4K", |
||
142 | "malloc-8K", |
||
143 | "malloc-16K", |
||
144 | "malloc-32K", |
||
145 | "malloc-64K", |
||
146 | "malloc-128K", |
||
147 | "malloc-256K" |
||
771 | palkovsky | 148 | }; |
762 | palkovsky | 149 | |
769 | palkovsky | 150 | /** Slab descriptor */ |
762 | palkovsky | 151 | typedef struct { |
1950 | jermar | 152 | slab_cache_t *cache; /**< Pointer to parent cache. */ |
153 | link_t link; /**< List of full/partial slabs. */ |
||
154 | void *start; /**< Start address of first available item. */ |
||
155 | count_t available; /**< Count of available items in this slab. */ |
||
156 | index_t nextavail; /**< The index of next available item. */ |
||
2124 | decky | 157 | } slab_t; |
762 | palkovsky | 158 | |
791 | palkovsky | 159 | #ifdef CONFIG_DEBUG |
160 | static int _slab_initialized = 0; |
||
161 | #endif |
||
162 | |||
759 | palkovsky | 163 | /**************************************/ |
1248 | jermar | 164 | /* Slab allocation functions */ |
759 | palkovsky | 165 | |
762 | palkovsky | 166 | /** |
167 | * Allocate frames for slab space and initialize |
||
168 | * |
||
169 | */ |
||
170 | static slab_t * slab_space_alloc(slab_cache_t *cache, int flags) |
||
171 | { |
||
172 | void *data; |
||
173 | slab_t *slab; |
||
174 | size_t fsize; |
||
175 | int i; |
||
2123 | decky | 176 | unsigned int zone = 0; |
814 | palkovsky | 177 | |
1766 | palkovsky | 178 | data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone); |
179 | if (!data) { |
||
762 | palkovsky | 180 | return NULL; |
764 | palkovsky | 181 | } |
768 | palkovsky | 182 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) { |
769 | palkovsky | 183 | slab = slab_alloc(slab_extern_cache, flags); |
762 | palkovsky | 184 | if (!slab) { |
1760 | palkovsky | 185 | frame_free(KA2PA(data)); |
762 | palkovsky | 186 | return NULL; |
187 | } |
||
188 | } else { |
||
189 | fsize = (PAGE_SIZE << cache->order); |
||
190 | slab = data + fsize - sizeof(*slab); |
||
191 | } |
||
1288 | jermar | 192 | |
762 | palkovsky | 193 | /* Fill in slab structures */ |
814 | palkovsky | 194 | for (i=0; i < (1 << cache->order); i++) |
1760 | palkovsky | 195 | frame_set_parent(ADDR2PFN(KA2PA(data))+i, slab, zone); |
762 | palkovsky | 196 | |
197 | slab->start = data; |
||
198 | slab->available = cache->objects; |
||
199 | slab->nextavail = 0; |
||
767 | palkovsky | 200 | slab->cache = cache; |
762 | palkovsky | 201 | |
202 | for (i=0; i<cache->objects;i++) |
||
203 | *((int *) (slab->start + i*cache->size)) = i+1; |
||
764 | palkovsky | 204 | |
205 | atomic_inc(&cache->allocated_slabs); |
||
762 | palkovsky | 206 | return slab; |
207 | } |
||
208 | |||
759 | palkovsky | 209 | /** |
1248 | jermar | 210 | * Deallocate space associated with slab |
762 | palkovsky | 211 | * |
212 | * @return number of freed frames |
||
213 | */ |
||
214 | static count_t slab_space_free(slab_cache_t *cache, slab_t *slab) |
||
215 | { |
||
1760 | palkovsky | 216 | frame_free(KA2PA(slab->start)); |
768 | palkovsky | 217 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) |
769 | palkovsky | 218 | slab_free(slab_extern_cache, slab); |
764 | palkovsky | 219 | |
220 | atomic_dec(&cache->allocated_slabs); |
||
221 | |||
762 | palkovsky | 222 | return 1 << cache->order; |
223 | } |
||
224 | |||
225 | /** Map object to slab structure */ |
||
226 | static slab_t * obj2slab(void *obj) |
||
227 | { |
||
2124 | decky | 228 | return (slab_t *) frame_get_parent(ADDR2PFN(KA2PA(obj)), 0); |
762 | palkovsky | 229 | } |
230 | |||
231 | /**************************************/ |
||
1248 | jermar | 232 | /* Slab functions */ |
762 | palkovsky | 233 | |
234 | |||
235 | /** |
||
759 | palkovsky | 236 | * Return object to slab and call a destructor |
237 | * |
||
762 | palkovsky | 238 | * @param slab If the caller knows directly slab of the object, otherwise NULL |
239 | * |
||
759 | palkovsky | 240 | * @return Number of freed pages |
241 | */ |
||
762 | palkovsky | 242 | static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, |
243 | slab_t *slab) |
||
759 | palkovsky | 244 | { |
787 | palkovsky | 245 | int freed = 0; |
246 | |||
762 | palkovsky | 247 | if (!slab) |
248 | slab = obj2slab(obj); |
||
249 | |||
767 | palkovsky | 250 | ASSERT(slab->cache == cache); |
251 | |||
787 | palkovsky | 252 | if (cache->destructor) |
253 | freed = cache->destructor(obj); |
||
254 | |||
776 | palkovsky | 255 | spinlock_lock(&cache->slablock); |
789 | palkovsky | 256 | ASSERT(slab->available < cache->objects); |
776 | palkovsky | 257 | |
762 | palkovsky | 258 | *((int *)obj) = slab->nextavail; |
259 | slab->nextavail = (obj - slab->start)/cache->size; |
||
260 | slab->available++; |
||
261 | |||
262 | /* Move it to correct list */ |
||
263 | if (slab->available == cache->objects) { |
||
264 | /* Free associated memory */ |
||
265 | list_remove(&slab->link); |
||
782 | palkovsky | 266 | spinlock_unlock(&cache->slablock); |
267 | |||
787 | palkovsky | 268 | return freed + slab_space_free(cache, slab); |
782 | palkovsky | 269 | |
780 | palkovsky | 270 | } else if (slab->available == 1) { |
271 | /* It was in full, move to partial */ |
||
272 | list_remove(&slab->link); |
||
273 | list_prepend(&slab->link, &cache->partial_slabs); |
||
762 | palkovsky | 274 | } |
783 | palkovsky | 275 | spinlock_unlock(&cache->slablock); |
787 | palkovsky | 276 | return freed; |
759 | palkovsky | 277 | } |
278 | |||
279 | /** |
||
280 | * Take new object from slab or create new if needed |
||
281 | * |
||
282 | * @return Object address or null |
||
283 | */ |
||
284 | static void * slab_obj_create(slab_cache_t *cache, int flags) |
||
285 | { |
||
762 | palkovsky | 286 | slab_t *slab; |
287 | void *obj; |
||
288 | |||
776 | palkovsky | 289 | spinlock_lock(&cache->slablock); |
290 | |||
762 | palkovsky | 291 | if (list_empty(&cache->partial_slabs)) { |
292 | /* Allow recursion and reclaiming |
||
1248 | jermar | 293 | * - this should work, as the slab control structures |
1288 | jermar | 294 | * are small and do not need to allocate with anything |
295 | * other than frame_alloc when they are allocating, |
||
762 | palkovsky | 296 | * that's why we should get recursion at most 1-level deep |
297 | */ |
||
776 | palkovsky | 298 | spinlock_unlock(&cache->slablock); |
762 | palkovsky | 299 | slab = slab_space_alloc(cache, flags); |
780 | palkovsky | 300 | if (!slab) |
301 | return NULL; |
||
776 | palkovsky | 302 | spinlock_lock(&cache->slablock); |
762 | palkovsky | 303 | } else { |
1950 | jermar | 304 | slab = list_get_instance(cache->partial_slabs.next, slab_t, link); |
762 | palkovsky | 305 | list_remove(&slab->link); |
306 | } |
||
307 | obj = slab->start + slab->nextavail * cache->size; |
||
308 | slab->nextavail = *((int *)obj); |
||
309 | slab->available--; |
||
787 | palkovsky | 310 | |
1950 | jermar | 311 | if (!slab->available) |
764 | palkovsky | 312 | list_prepend(&slab->link, &cache->full_slabs); |
762 | palkovsky | 313 | else |
764 | palkovsky | 314 | list_prepend(&slab->link, &cache->partial_slabs); |
776 | palkovsky | 315 | |
316 | spinlock_unlock(&cache->slablock); |
||
787 | palkovsky | 317 | |
318 | if (cache->constructor && cache->constructor(obj, flags)) { |
||
319 | /* Bad, bad, construction failed */ |
||
320 | slab_obj_destroy(cache, obj, slab); |
||
321 | return NULL; |
||
322 | } |
||
762 | palkovsky | 323 | return obj; |
759 | palkovsky | 324 | } |
325 | |||
326 | /**************************************/ |
||
327 | /* CPU-Cache slab functions */ |
||
328 | |||
329 | /** |
||
781 | palkovsky | 330 | * Finds a full magazine in cache, takes it from list |
331 | * and returns it |
||
332 | * |
||
333 | * @param first If true, return first, else last mag |
||
334 | */ |
||
335 | static slab_magazine_t * get_mag_from_cache(slab_cache_t *cache, |
||
336 | int first) |
||
337 | { |
||
338 | slab_magazine_t *mag = NULL; |
||
339 | link_t *cur; |
||
340 | |||
341 | spinlock_lock(&cache->maglock); |
||
342 | if (!list_empty(&cache->magazines)) { |
||
343 | if (first) |
||
344 | cur = cache->magazines.next; |
||
345 | else |
||
346 | cur = cache->magazines.prev; |
||
347 | mag = list_get_instance(cur, slab_magazine_t, link); |
||
348 | list_remove(&mag->link); |
||
349 | atomic_dec(&cache->magazine_counter); |
||
350 | } |
||
351 | spinlock_unlock(&cache->maglock); |
||
352 | return mag; |
||
353 | } |
||
354 | |||
355 | /** Prepend magazine to magazine list in cache */ |
||
356 | static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag) |
||
357 | { |
||
358 | spinlock_lock(&cache->maglock); |
||
359 | |||
360 | list_prepend(&mag->link, &cache->magazines); |
||
361 | atomic_inc(&cache->magazine_counter); |
||
362 | |||
363 | spinlock_unlock(&cache->maglock); |
||
364 | } |
||
365 | |||
366 | /** |
||
759 | palkovsky | 367 | * Free all objects in magazine and free memory associated with magazine |
368 | * |
||
369 | * @return Number of freed pages |
||
370 | */ |
||
371 | static count_t magazine_destroy(slab_cache_t *cache, |
||
372 | slab_magazine_t *mag) |
||
373 | { |
||
374 | int i; |
||
375 | count_t frames = 0; |
||
376 | |||
767 | palkovsky | 377 | for (i=0;i < mag->busy; i++) { |
762 | palkovsky | 378 | frames += slab_obj_destroy(cache, mag->objs[i], NULL); |
767 | palkovsky | 379 | atomic_dec(&cache->cached_objs); |
380 | } |
||
759 | palkovsky | 381 | |
382 | slab_free(&mag_cache, mag); |
||
383 | |||
384 | return frames; |
||
385 | } |
||
386 | |||
387 | /** |
||
769 | palkovsky | 388 | * Find full magazine, set it as current and return it |
389 | * |
||
390 | * Assume cpu_magazine lock is held |
||
391 | */ |
||
392 | static slab_magazine_t * get_full_current_mag(slab_cache_t *cache) |
||
393 | { |
||
394 | slab_magazine_t *cmag, *lastmag, *newmag; |
||
395 | |||
396 | cmag = cache->mag_cache[CPU->id].current; |
||
397 | lastmag = cache->mag_cache[CPU->id].last; |
||
398 | if (cmag) { /* First try local CPU magazines */ |
||
399 | if (cmag->busy) |
||
400 | return cmag; |
||
401 | |||
402 | if (lastmag && lastmag->busy) { |
||
403 | cache->mag_cache[CPU->id].current = lastmag; |
||
404 | cache->mag_cache[CPU->id].last = cmag; |
||
405 | return lastmag; |
||
406 | } |
||
407 | } |
||
408 | /* Local magazines are empty, import one from magazine list */ |
||
781 | palkovsky | 409 | newmag = get_mag_from_cache(cache, 1); |
410 | if (!newmag) |
||
769 | palkovsky | 411 | return NULL; |
412 | |||
413 | if (lastmag) |
||
781 | palkovsky | 414 | magazine_destroy(cache, lastmag); |
415 | |||
769 | palkovsky | 416 | cache->mag_cache[CPU->id].last = cmag; |
417 | cache->mag_cache[CPU->id].current = newmag; |
||
418 | return newmag; |
||
419 | } |
||
420 | |||
421 | /** |
||
759 | palkovsky | 422 | * Try to find object in CPU-cache magazines |
423 | * |
||
424 | * @return Pointer to object or NULL if not available |
||
425 | */ |
||
426 | static void * magazine_obj_get(slab_cache_t *cache) |
||
427 | { |
||
428 | slab_magazine_t *mag; |
||
767 | palkovsky | 429 | void *obj; |
759 | palkovsky | 430 | |
772 | palkovsky | 431 | if (!CPU) |
432 | return NULL; |
||
433 | |||
759 | palkovsky | 434 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
435 | |||
769 | palkovsky | 436 | mag = get_full_current_mag(cache); |
437 | if (!mag) { |
||
438 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
||
439 | return NULL; |
||
759 | palkovsky | 440 | } |
767 | palkovsky | 441 | obj = mag->objs[--mag->busy]; |
759 | palkovsky | 442 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
767 | palkovsky | 443 | atomic_dec(&cache->cached_objs); |
444 | |||
445 | return obj; |
||
759 | palkovsky | 446 | } |
447 | |||
448 | /** |
||
768 | palkovsky | 449 | * Assure that the current magazine is empty, return pointer to it, or NULL if |
769 | palkovsky | 450 | * no empty magazine is available and cannot be allocated |
759 | palkovsky | 451 | * |
773 | palkovsky | 452 | * Assume mag_cache[CPU->id].lock is held |
453 | * |
||
759 | palkovsky | 454 | * We have 2 magazines bound to processor. |
455 | * First try the current. |
||
456 | * If full, try the last. |
||
457 | * If full, put to magazines list. |
||
458 | * allocate new, exchange last & current |
||
459 | * |
||
768 | palkovsky | 460 | */ |
461 | static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache) |
||
462 | { |
||
463 | slab_magazine_t *cmag,*lastmag,*newmag; |
||
464 | |||
465 | cmag = cache->mag_cache[CPU->id].current; |
||
466 | lastmag = cache->mag_cache[CPU->id].last; |
||
467 | |||
468 | if (cmag) { |
||
469 | if (cmag->busy < cmag->size) |
||
470 | return cmag; |
||
471 | if (lastmag && lastmag->busy < lastmag->size) { |
||
472 | cache->mag_cache[CPU->id].last = cmag; |
||
473 | cache->mag_cache[CPU->id].current = lastmag; |
||
474 | return lastmag; |
||
475 | } |
||
476 | } |
||
477 | /* current | last are full | nonexistent, allocate new */ |
||
478 | /* We do not want to sleep just because of caching */ |
||
479 | /* Especially we do not want reclaiming to start, as |
||
480 | * this would deadlock */ |
||
481 | newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); |
||
482 | if (!newmag) |
||
483 | return NULL; |
||
484 | newmag->size = SLAB_MAG_SIZE; |
||
485 | newmag->busy = 0; |
||
486 | |||
487 | /* Flush last to magazine list */ |
||
781 | palkovsky | 488 | if (lastmag) |
489 | put_mag_to_cache(cache, lastmag); |
||
490 | |||
768 | palkovsky | 491 | /* Move current as last, save new as current */ |
492 | cache->mag_cache[CPU->id].last = cmag; |
||
493 | cache->mag_cache[CPU->id].current = newmag; |
||
494 | |||
495 | return newmag; |
||
496 | } |
||
497 | |||
498 | /** |
||
499 | * Put object into CPU-cache magazine |
||
500 | * |
||
759 | palkovsky | 501 | * @return 0 - success, -1 - could not get memory |
502 | */ |
||
503 | static int magazine_obj_put(slab_cache_t *cache, void *obj) |
||
504 | { |
||
505 | slab_magazine_t *mag; |
||
506 | |||
772 | palkovsky | 507 | if (!CPU) |
508 | return -1; |
||
509 | |||
759 | palkovsky | 510 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
768 | palkovsky | 511 | |
512 | mag = make_empty_current_mag(cache); |
||
769 | palkovsky | 513 | if (!mag) { |
514 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
||
515 | return -1; |
||
516 | } |
||
759 | palkovsky | 517 | |
518 | mag->objs[mag->busy++] = obj; |
||
519 | |||
520 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
||
767 | palkovsky | 521 | atomic_inc(&cache->cached_objs); |
759 | palkovsky | 522 | return 0; |
523 | } |
||
524 | |||
525 | |||
526 | /**************************************/ |
||
1248 | jermar | 527 | /* Slab cache functions */ |
759 | palkovsky | 528 | |
762 | palkovsky | 529 | /** Return number of objects that fit in certain cache size */ |
530 | static int comp_objects(slab_cache_t *cache) |
||
531 | { |
||
532 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
||
533 | return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size; |
||
534 | else |
||
535 | return (PAGE_SIZE << cache->order) / cache->size; |
||
536 | } |
||
537 | |||
538 | /** Return wasted space in slab */ |
||
539 | static int badness(slab_cache_t *cache) |
||
540 | { |
||
541 | int objects; |
||
542 | int ssize; |
||
543 | |||
544 | objects = comp_objects(cache); |
||
545 | ssize = PAGE_SIZE << cache->order; |
||
546 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
||
547 | ssize -= sizeof(slab_t); |
||
548 | return ssize - objects*cache->size; |
||
549 | } |
||
550 | |||
789 | palkovsky | 551 | /** |
552 | * Initialize mag_cache structure in slab cache |
||
553 | */ |
||
554 | static void make_magcache(slab_cache_t *cache) |
||
555 | { |
||
556 | int i; |
||
791 | palkovsky | 557 | |
558 | ASSERT(_slab_initialized >= 2); |
||
789 | palkovsky | 559 | |
822 | palkovsky | 560 | cache->mag_cache = malloc(sizeof(slab_mag_cache_t)*config.cpu_count,0); |
789 | palkovsky | 561 | for (i=0; i < config.cpu_count; i++) { |
1780 | jermar | 562 | memsetb((uintptr_t)&cache->mag_cache[i], |
789 | palkovsky | 563 | sizeof(cache->mag_cache[i]), 0); |
564 | spinlock_initialize(&cache->mag_cache[i].lock, |
||
565 | "slab_maglock_cpu"); |
||
566 | } |
||
567 | } |
||
568 | |||
759 | palkovsky | 569 | /** Initialize allocated memory as a slab cache */ |
570 | static void |
||
571 | _slab_cache_create(slab_cache_t *cache, |
||
572 | char *name, |
||
573 | size_t size, |
||
574 | size_t align, |
||
575 | int (*constructor)(void *obj, int kmflag), |
||
787 | palkovsky | 576 | int (*destructor)(void *obj), |
759 | palkovsky | 577 | int flags) |
578 | { |
||
771 | palkovsky | 579 | int pages; |
783 | palkovsky | 580 | ipl_t ipl; |
759 | palkovsky | 581 | |
1780 | jermar | 582 | memsetb((uintptr_t)cache, sizeof(*cache), 0); |
759 | palkovsky | 583 | cache->name = name; |
584 | |||
1780 | jermar | 585 | if (align < sizeof(unative_t)) |
586 | align = sizeof(unative_t); |
||
766 | palkovsky | 587 | size = ALIGN_UP(size, align); |
588 | |||
762 | palkovsky | 589 | cache->size = size; |
759 | palkovsky | 590 | |
591 | cache->constructor = constructor; |
||
592 | cache->destructor = destructor; |
||
593 | cache->flags = flags; |
||
594 | |||
595 | list_initialize(&cache->full_slabs); |
||
596 | list_initialize(&cache->partial_slabs); |
||
597 | list_initialize(&cache->magazines); |
||
776 | palkovsky | 598 | spinlock_initialize(&cache->slablock, "slab_lock"); |
599 | spinlock_initialize(&cache->maglock, "slab_maglock"); |
||
789 | palkovsky | 600 | if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) |
601 | make_magcache(cache); |
||
759 | palkovsky | 602 | |
603 | /* Compute slab sizes, object counts in slabs etc. */ |
||
604 | if (cache->size < SLAB_INSIDE_SIZE) |
||
605 | cache->flags |= SLAB_CACHE_SLINSIDE; |
||
606 | |||
762 | palkovsky | 607 | /* Minimum slab order */ |
1682 | palkovsky | 608 | pages = SIZE2FRAMES(cache->size); |
1677 | palkovsky | 609 | /* We need the 2^order >= pages */ |
610 | if (pages == 1) |
||
611 | cache->order = 0; |
||
612 | else |
||
613 | cache->order = fnzb(pages-1)+1; |
||
766 | palkovsky | 614 | |
762 | palkovsky | 615 | while (badness(cache) > SLAB_MAX_BADNESS(cache)) { |
616 | cache->order += 1; |
||
617 | } |
||
618 | cache->objects = comp_objects(cache); |
||
766 | palkovsky | 619 | /* If info fits in, put it inside */ |
620 | if (badness(cache) > sizeof(slab_t)) |
||
621 | cache->flags |= SLAB_CACHE_SLINSIDE; |
||
762 | palkovsky | 622 | |
783 | palkovsky | 623 | /* Add cache to cache list */ |
624 | ipl = interrupts_disable(); |
||
759 | palkovsky | 625 | spinlock_lock(&slab_cache_lock); |
626 | |||
627 | list_append(&cache->link, &slab_cache_list); |
||
628 | |||
629 | spinlock_unlock(&slab_cache_lock); |
||
783 | palkovsky | 630 | interrupts_restore(ipl); |
759 | palkovsky | 631 | } |
632 | |||
633 | /** Create slab cache */ |
||
634 | slab_cache_t * slab_cache_create(char *name, |
||
635 | size_t size, |
||
636 | size_t align, |
||
637 | int (*constructor)(void *obj, int kmflag), |
||
787 | palkovsky | 638 | int (*destructor)(void *obj), |
759 | palkovsky | 639 | int flags) |
640 | { |
||
641 | slab_cache_t *cache; |
||
642 | |||
769 | palkovsky | 643 | cache = slab_alloc(&slab_cache_cache, 0); |
759 | palkovsky | 644 | _slab_cache_create(cache, name, size, align, constructor, destructor, |
645 | flags); |
||
646 | return cache; |
||
647 | } |
||
648 | |||
649 | /** |
||
650 | * Reclaim space occupied by objects that are already free |
||
651 | * |
||
652 | * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing |
||
653 | * @return Number of freed pages |
||
654 | */ |
||
655 | static count_t _slab_reclaim(slab_cache_t *cache, int flags) |
||
656 | { |
||
657 | int i; |
||
658 | slab_magazine_t *mag; |
||
659 | count_t frames = 0; |
||
781 | palkovsky | 660 | int magcount; |
759 | palkovsky | 661 | |
662 | if (cache->flags & SLAB_CACHE_NOMAGAZINE) |
||
663 | return 0; /* Nothing to do */ |
||
781 | palkovsky | 664 | |
665 | /* We count up to original magazine count to avoid |
||
666 | * endless loop |
||
667 | */ |
||
668 | magcount = atomic_get(&cache->magazine_counter); |
||
669 | while (magcount-- && (mag=get_mag_from_cache(cache,0))) { |
||
670 | frames += magazine_destroy(cache,mag); |
||
671 | if (!(flags & SLAB_RECLAIM_ALL) && frames) |
||
672 | break; |
||
769 | palkovsky | 673 | } |
759 | palkovsky | 674 | |
675 | if (flags & SLAB_RECLAIM_ALL) { |
||
781 | palkovsky | 676 | /* Free cpu-bound magazines */ |
759 | palkovsky | 677 | /* Destroy CPU magazines */ |
678 | for (i=0; i<config.cpu_count; i++) { |
||
781 | palkovsky | 679 | spinlock_lock(&cache->mag_cache[i].lock); |
680 | |||
759 | palkovsky | 681 | mag = cache->mag_cache[i].current; |
682 | if (mag) |
||
683 | frames += magazine_destroy(cache, mag); |
||
684 | cache->mag_cache[i].current = NULL; |
||
685 | |||
686 | mag = cache->mag_cache[i].last; |
||
687 | if (mag) |
||
688 | frames += magazine_destroy(cache, mag); |
||
689 | cache->mag_cache[i].last = NULL; |
||
781 | palkovsky | 690 | |
691 | spinlock_unlock(&cache->mag_cache[i].lock); |
||
759 | palkovsky | 692 | } |
693 | } |
||
767 | palkovsky | 694 | |
759 | palkovsky | 695 | return frames; |
696 | } |
||
697 | |||
698 | /** Check that there are no slabs and remove cache from system */ |
||
699 | void slab_cache_destroy(slab_cache_t *cache) |
||
700 | { |
||
781 | palkovsky | 701 | ipl_t ipl; |
702 | |||
703 | /* First remove cache from link, so that we don't need |
||
704 | * to disable interrupts later |
||
705 | */ |
||
706 | |||
707 | ipl = interrupts_disable(); |
||
708 | spinlock_lock(&slab_cache_lock); |
||
709 | |||
710 | list_remove(&cache->link); |
||
711 | |||
712 | spinlock_unlock(&slab_cache_lock); |
||
713 | interrupts_restore(ipl); |
||
714 | |||
759 | palkovsky | 715 | /* Do not lock anything, we assume the software is correct and |
716 | * does not touch the cache when it decides to destroy it */ |
||
717 | |||
718 | /* Destroy all magazines */ |
||
719 | _slab_reclaim(cache, SLAB_RECLAIM_ALL); |
||
720 | |||
721 | /* All slabs must be empty */ |
||
722 | if (!list_empty(&cache->full_slabs) \ |
||
723 | || !list_empty(&cache->partial_slabs)) |
||
724 | panic("Destroying cache that is not empty."); |
||
725 | |||
789 | palkovsky | 726 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) |
822 | palkovsky | 727 | free(cache->mag_cache); |
769 | palkovsky | 728 | slab_free(&slab_cache_cache, cache); |
759 | palkovsky | 729 | } |
730 | |||
731 | /** Allocate new object from cache - if no flags given, always returns |
||
732 | memory */ |
||
733 | void * slab_alloc(slab_cache_t *cache, int flags) |
||
734 | { |
||
735 | ipl_t ipl; |
||
736 | void *result = NULL; |
||
773 | palkovsky | 737 | |
759 | palkovsky | 738 | /* Disable interrupts to avoid deadlocks with interrupt handlers */ |
739 | ipl = interrupts_disable(); |
||
771 | palkovsky | 740 | |
814 | palkovsky | 741 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) { |
759 | palkovsky | 742 | result = magazine_obj_get(cache); |
814 | palkovsky | 743 | } |
776 | palkovsky | 744 | if (!result) |
759 | palkovsky | 745 | result = slab_obj_create(cache, flags); |
746 | |||
769 | palkovsky | 747 | interrupts_restore(ipl); |
748 | |||
764 | palkovsky | 749 | if (result) |
750 | atomic_inc(&cache->allocated_objs); |
||
751 | |||
759 | palkovsky | 752 | return result; |
753 | } |
||
754 | |||
771 | palkovsky | 755 | /** Return object to cache, use slab if known */ |
756 | static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) |
||
759 | palkovsky | 757 | { |
758 | ipl_t ipl; |
||
759 | |||
760 | ipl = interrupts_disable(); |
||
761 | |||
762 | palkovsky | 762 | if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \ |
763 | || magazine_obj_put(cache, obj)) { |
||
776 | palkovsky | 764 | |
771 | palkovsky | 765 | slab_obj_destroy(cache, obj, slab); |
776 | palkovsky | 766 | |
759 | palkovsky | 767 | } |
769 | palkovsky | 768 | interrupts_restore(ipl); |
764 | palkovsky | 769 | atomic_dec(&cache->allocated_objs); |
759 | palkovsky | 770 | } |
771 | |||
771 | palkovsky | 772 | /** Return slab object to cache */ |
773 | void slab_free(slab_cache_t *cache, void *obj) |
||
774 | { |
||
2124 | decky | 775 | _slab_free(cache, obj, NULL); |
771 | palkovsky | 776 | } |
777 | |||
759 | palkovsky | 778 | /* Go through all caches and reclaim what is possible */ |
779 | count_t slab_reclaim(int flags) |
||
780 | { |
||
781 | slab_cache_t *cache; |
||
782 | link_t *cur; |
||
783 | count_t frames = 0; |
||
784 | |||
785 | spinlock_lock(&slab_cache_lock); |
||
786 | |||
776 | palkovsky | 787 | /* TODO: Add assert, that interrupts are disabled, otherwise |
788 | * memory allocation from interrupts can deadlock. |
||
789 | */ |
||
790 | |||
759 | palkovsky | 791 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
792 | cache = list_get_instance(cur, slab_cache_t, link); |
||
793 | frames += _slab_reclaim(cache, flags); |
||
794 | } |
||
795 | |||
796 | spinlock_unlock(&slab_cache_lock); |
||
797 | |||
798 | return frames; |
||
799 | } |
||
800 | |||
801 | |||
802 | /* Print list of slabs */ |
||
803 | void slab_print_list(void) |
||
804 | { |
||
805 | slab_cache_t *cache; |
||
806 | link_t *cur; |
||
783 | palkovsky | 807 | ipl_t ipl; |
808 | |||
809 | ipl = interrupts_disable(); |
||
759 | palkovsky | 810 | spinlock_lock(&slab_cache_lock); |
2052 | decky | 811 | printf("slab name size pages obj/pg slabs cached allocated ctl\n"); |
812 | printf("---------------- -------- ------ ------ ------ ------ --------- ---\n"); |
||
813 | |||
814 | for (cur = slab_cache_list.next; cur != &slab_cache_list; cur = cur->next) { |
||
759 | palkovsky | 815 | cache = list_get_instance(cur, slab_cache_t, link); |
2052 | decky | 816 | |
817 | printf("%-16s %8zd %6zd %6zd %6zd %6zd %9zd %-3s\n", cache->name, cache->size, (1 << cache->order), cache->objects, atomic_get(&cache->allocated_slabs), atomic_get(&cache->cached_objs), atomic_get(&cache->allocated_objs), cache->flags & SLAB_CACHE_SLINSIDE ? "in" : "out"); |
||
759 | palkovsky | 818 | } |
819 | spinlock_unlock(&slab_cache_lock); |
||
783 | palkovsky | 820 | interrupts_restore(ipl); |
759 | palkovsky | 821 | } |
822 | |||
823 | void slab_cache_init(void) |
||
824 | { |
||
771 | palkovsky | 825 | int i, size; |
826 | |||
759 | palkovsky | 827 | /* Initialize magazine cache */ |
828 | _slab_cache_create(&mag_cache, |
||
829 | "slab_magazine", |
||
830 | sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*), |
||
1780 | jermar | 831 | sizeof(uintptr_t), |
759 | palkovsky | 832 | NULL, NULL, |
769 | palkovsky | 833 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
834 | /* Initialize slab_cache cache */ |
||
835 | _slab_cache_create(&slab_cache_cache, |
||
836 | "slab_cache", |
||
789 | palkovsky | 837 | sizeof(slab_cache_cache), |
1780 | jermar | 838 | sizeof(uintptr_t), |
769 | palkovsky | 839 | NULL, NULL, |
840 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
||
841 | /* Initialize external slab cache */ |
||
842 | slab_extern_cache = slab_cache_create("slab_extern", |
||
843 | sizeof(slab_t), |
||
844 | 0, NULL, NULL, |
||
789 | palkovsky | 845 | SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED); |
759 | palkovsky | 846 | |
847 | /* Initialize structures for malloc */ |
||
771 | palkovsky | 848 | for (i=0, size=(1<<SLAB_MIN_MALLOC_W); |
849 | i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1); |
||
850 | i++, size <<= 1) { |
||
851 | malloc_caches[i] = slab_cache_create(malloc_names[i], |
||
852 | size, 0, |
||
789 | palkovsky | 853 | NULL,NULL, SLAB_CACHE_MAGDEFERRED); |
771 | palkovsky | 854 | } |
778 | palkovsky | 855 | #ifdef CONFIG_DEBUG |
856 | _slab_initialized = 1; |
||
857 | #endif |
||
759 | palkovsky | 858 | } |
771 | palkovsky | 859 | |
789 | palkovsky | 860 | /** Enable cpu_cache |
861 | * |
||
862 | * Kernel calls this function, when it knows the real number of |
||
863 | * processors. |
||
864 | * Allocate slab for cpucache and enable it on all existing |
||
865 | * slabs that are SLAB_CACHE_MAGDEFERRED |
||
866 | */ |
||
867 | void slab_enable_cpucache(void) |
||
868 | { |
||
869 | link_t *cur; |
||
870 | slab_cache_t *s; |
||
871 | |||
791 | palkovsky | 872 | #ifdef CONFIG_DEBUG |
873 | _slab_initialized = 2; |
||
874 | #endif |
||
875 | |||
789 | palkovsky | 876 | spinlock_lock(&slab_cache_lock); |
877 | |||
878 | for (cur=slab_cache_list.next; cur != &slab_cache_list;cur=cur->next){ |
||
879 | s = list_get_instance(cur, slab_cache_t, link); |
||
880 | if ((s->flags & SLAB_CACHE_MAGDEFERRED) != SLAB_CACHE_MAGDEFERRED) |
||
881 | continue; |
||
882 | make_magcache(s); |
||
883 | s->flags &= ~SLAB_CACHE_MAGDEFERRED; |
||
884 | } |
||
885 | |||
886 | spinlock_unlock(&slab_cache_lock); |
||
887 | } |
||
888 | |||
771 | palkovsky | 889 | /**************************************/ |
890 | /* kalloc/kfree functions */ |
||
822 | palkovsky | 891 | void * malloc(unsigned int size, int flags) |
771 | palkovsky | 892 | { |
778 | palkovsky | 893 | ASSERT(_slab_initialized); |
1288 | jermar | 894 | ASSERT(size && size <= (1 << SLAB_MAX_MALLOC_W)); |
771 | palkovsky | 895 | |
896 | if (size < (1 << SLAB_MIN_MALLOC_W)) |
||
897 | size = (1 << SLAB_MIN_MALLOC_W); |
||
898 | |||
2124 | decky | 899 | int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1; |
771 | palkovsky | 900 | |
901 | return slab_alloc(malloc_caches[idx], flags); |
||
902 | } |
||
903 | |||
2124 | decky | 904 | void * realloc(void *ptr, unsigned int size, int flags) |
771 | palkovsky | 905 | { |
2124 | decky | 906 | ASSERT(_slab_initialized); |
907 | ASSERT(size <= (1 << SLAB_MAX_MALLOC_W)); |
||
908 | |||
909 | void *new_ptr; |
||
910 | |||
911 | if (size > 0) { |
||
912 | if (size < (1 << SLAB_MIN_MALLOC_W)) |
||
913 | size = (1 << SLAB_MIN_MALLOC_W); |
||
914 | int idx = fnzb(size - 1) - SLAB_MIN_MALLOC_W + 1; |
||
915 | |||
916 | new_ptr = slab_alloc(malloc_caches[idx], flags); |
||
917 | } else |
||
918 | new_ptr = NULL; |
||
919 | |||
920 | if ((new_ptr != NULL) && (ptr != NULL)) { |
||
921 | slab_t *slab = obj2slab(ptr); |
||
922 | memcpy(new_ptr, ptr, min(size, slab->cache->size)); |
||
923 | } |
||
924 | |||
925 | if (ptr != NULL) |
||
926 | free(ptr); |
||
927 | |||
928 | return new_ptr; |
||
929 | } |
||
781 | palkovsky | 930 | |
2124 | decky | 931 | void free(void *ptr) |
932 | { |
||
933 | if (!ptr) |
||
1950 | jermar | 934 | return; |
781 | palkovsky | 935 | |
2124 | decky | 936 | slab_t *slab = obj2slab(ptr); |
937 | _slab_free(slab->cache, ptr, slab); |
||
771 | palkovsky | 938 | } |
1702 | cejka | 939 | |
1757 | jermar | 940 | /** @} |
1702 | cejka | 941 | */ |