Rev 2071 | Rev 2124 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
| Rev | Author | Line No. | Line |
|---|---|---|---|
| 759 | palkovsky | 1 | /* |
| 2071 | jermar | 2 | * Copyright (c) 2006 Ondrej Palkovsky |
| 759 | palkovsky | 3 | * All rights reserved. |
| 4 | * |
||
| 5 | * Redistribution and use in source and binary forms, with or without |
||
| 6 | * modification, are permitted provided that the following conditions |
||
| 7 | * are met: |
||
| 8 | * |
||
| 9 | * - Redistributions of source code must retain the above copyright |
||
| 10 | * notice, this list of conditions and the following disclaimer. |
||
| 11 | * - Redistributions in binary form must reproduce the above copyright |
||
| 12 | * notice, this list of conditions and the following disclaimer in the |
||
| 13 | * documentation and/or other materials provided with the distribution. |
||
| 14 | * - The name of the author may not be used to endorse or promote products |
||
| 15 | * derived from this software without specific prior written permission. |
||
| 16 | * |
||
| 17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
| 18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
| 19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
| 20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
| 21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
| 22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
| 23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
| 24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
| 25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
| 26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
| 27 | */ |
||
| 28 | |||
| 1757 | jermar | 29 | /** @addtogroup genericmm |
| 1702 | cejka | 30 | * @{ |
| 31 | */ |
||
| 32 | |||
| 1248 | jermar | 33 | /** |
| 1702 | cejka | 34 | * @file |
| 1248 | jermar | 35 | * @brief Slab allocator. |
| 769 | palkovsky | 36 | * |
| 1248 | jermar | 37 | * The slab allocator is closely modelled after OpenSolaris slab allocator. |
| 38 | * @see http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/ |
||
| 39 | * |
||
| 769 | palkovsky | 40 | * with the following exceptions: |
| 1248 | jermar | 41 | * @li empty slabs are deallocated immediately |
| 769 | palkovsky | 42 | * (in Linux they are kept in linked list, in Solaris ???) |
| 1248 | jermar | 43 | * @li empty magazines are deallocated when not needed |
| 769 | palkovsky | 44 | * (in Solaris they are held in linked list in slab cache) |
| 45 | * |
||
| 1248 | jermar | 46 | * Following features are not currently supported but would be easy to do: |
| 47 | * @li cache coloring |
||
| 48 | * @li dynamic magazine growing (different magazine sizes are already |
||
| 1144 | jermar | 49 | * supported, but we would need to adjust allocation strategy) |
| 769 | palkovsky | 50 | * |
| 1248 | jermar | 51 | * The slab allocator supports per-CPU caches ('magazines') to facilitate |
| 769 | palkovsky | 52 | * good SMP scaling. |
| 53 | * |
||
| 54 | * When a new object is being allocated, it is first checked, if it is |
||
| 1554 | jermar | 55 | * available in a CPU-bound magazine. If it is not found there, it is |
| 56 | * allocated from a CPU-shared slab - if a partially full one is found, |
||
| 57 | * it is used, otherwise a new one is allocated. |
||
| 769 | palkovsky | 58 | * |
| 1554 | jermar | 59 | * When an object is being deallocated, it is put to a CPU-bound magazine. |
| 60 | * If there is no such magazine, a new one is allocated (if this fails, |
||
| 1248 | jermar | 61 | * the object is deallocated into slab). If the magazine is full, it is |
| 1554 | jermar | 62 | * put into cpu-shared list of magazines and a new one is allocated. |
| 769 | palkovsky | 63 | * |
| 1554 | jermar | 64 | * The CPU-bound magazine is actually a pair of magazines in order to avoid |
| 769 | palkovsky | 65 | * thrashing when somebody is allocating/deallocating 1 item at the magazine |
| 66 | * size boundary. LIFO order is enforced, which should avoid fragmentation |
||
| 67 | * as much as possible. |
||
| 68 | * |
||
| 1554 | jermar | 69 | * Every cache contains list of full slabs and list of partially full slabs. |
| 1248 | jermar | 70 | * Empty slabs are immediately freed (thrashing will be avoided because |
| 769 | palkovsky | 71 | * of magazines). |
| 72 | * |
||
| 1248 | jermar | 73 | * The slab information structure is kept inside the data area, if possible. |
| 769 | palkovsky | 74 | * The cache can be marked that it should not use magazines. This is used |
| 1248 | jermar | 75 | * only for slab related caches to avoid deadlocks and infinite recursion |
| 76 | * (the slab allocator uses itself for allocating all it's control structures). |
||
| 769 | palkovsky | 77 | * |
| 1554 | jermar | 78 | * The slab allocator allocates a lot of space and does not free it. When |
| 79 | * the frame allocator fails to allocate a frame, it calls slab_reclaim(). |
||
| 769 | palkovsky | 80 | * It tries 'light reclaim' first, then brutal reclaim. The light reclaim |
| 81 | * releases slabs from cpu-shared magazine-list, until at least 1 slab |
||
| 82 | * is deallocated in each cache (this algorithm should probably change). |
||
| 83 | * The brutal reclaim removes all cached objects, even from CPU-bound |
||
| 84 | * magazines. |
||
| 85 | * |
||
| 1757 | jermar | 86 | * @todo |
| 1248 | jermar | 87 | * For better CPU-scaling the magazine allocation strategy should |
| 775 | palkovsky | 88 | * be extended. Currently, if the cache does not have magazine, it asks |
| 89 | * for non-cpu cached magazine cache to provide one. It might be feasible |
||
| 90 | * to add cpu-cached magazine cache (which would allocate it's magazines |
||
| 91 | * from non-cpu-cached mag. cache). This would provide a nice per-cpu |
||
| 92 | * buffer. The other possibility is to use the per-cache |
||
| 93 | * 'empty-magazine-list', which decreases competing for 1 per-system |
||
| 94 | * magazine cache. |
||
| 95 | * |
||
| 1757 | jermar | 96 | * @todo |
| 97 | * it might be good to add granularity of locks even to slab level, |
||
| 98 | * we could then try_spinlock over all partial slabs and thus improve |
||
| 99 | * scalability even on slab level |
||
| 769 | palkovsky | 100 | */ |
| 101 | |||
| 759 | palkovsky | 102 | #include <synch/spinlock.h> |
| 103 | #include <mm/slab.h> |
||
| 788 | jermar | 104 | #include <adt/list.h> |
| 759 | palkovsky | 105 | #include <memstr.h> |
| 106 | #include <align.h> |
||
| 762 | palkovsky | 107 | #include <mm/frame.h> |
| 759 | palkovsky | 108 | #include <config.h> |
| 109 | #include <print.h> |
||
| 110 | #include <arch.h> |
||
| 111 | #include <panic.h> |
||
| 762 | palkovsky | 112 | #include <debug.h> |
| 771 | palkovsky | 113 | #include <bitops.h> |
| 759 | palkovsky | 114 | |
| 115 | SPINLOCK_INITIALIZE(slab_cache_lock); |
||
| 769 | palkovsky | 116 | static LIST_INITIALIZE(slab_cache_list); |
| 759 | palkovsky | 117 | |
| 769 | palkovsky | 118 | /** Magazine cache */ |
| 119 | static slab_cache_t mag_cache; |
||
| 120 | /** Cache for cache descriptors */ |
||
| 121 | static slab_cache_t slab_cache_cache; |
||
| 122 | /** Cache for external slab descriptors |
||
| 123 | * This time we want per-cpu cache, so do not make it static |
||
| 1248 | jermar | 124 | * - using slab for internal slab structures will not deadlock, |
| 769 | palkovsky | 125 | * as all slab structures are 'small' - control structures of |
| 126 | * their caches do not require further allocation |
||
| 127 | */ |
||
| 128 | static slab_cache_t *slab_extern_cache; |
||
| 771 | palkovsky | 129 | /** Caches for malloc */ |
| 130 | static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1]; |
||
| 131 | char *malloc_names[] = { |
||
| 791 | palkovsky | 132 | "malloc-16","malloc-32","malloc-64","malloc-128", |
| 771 | palkovsky | 133 | "malloc-256","malloc-512","malloc-1K","malloc-2K", |
| 134 | "malloc-4K","malloc-8K","malloc-16K","malloc-32K", |
||
| 1428 | palkovsky | 135 | "malloc-64K","malloc-128K","malloc-256K" |
| 771 | palkovsky | 136 | }; |
| 762 | palkovsky | 137 | |
| 769 | palkovsky | 138 | /** Slab descriptor */ |
| 762 | palkovsky | 139 | typedef struct { |
| 1950 | jermar | 140 | slab_cache_t *cache; /**< Pointer to parent cache. */ |
| 141 | link_t link; /**< List of full/partial slabs. */ |
||
| 142 | void *start; /**< Start address of first available item. */ |
||
| 143 | count_t available; /**< Count of available items in this slab. */ |
||
| 144 | index_t nextavail; /**< The index of next available item. */ |
||
| 762 | palkovsky | 145 | }slab_t; |
| 146 | |||
| 791 | palkovsky | 147 | #ifdef CONFIG_DEBUG |
| 148 | static int _slab_initialized = 0; |
||
| 149 | #endif |
||
| 150 | |||
| 759 | palkovsky | 151 | /**************************************/ |
| 1248 | jermar | 152 | /* Slab allocation functions */ |
| 759 | palkovsky | 153 | |
| 762 | palkovsky | 154 | /** |
| 155 | * Allocate frames for slab space and initialize |
||
| 156 | * |
||
| 157 | */ |
||
| 158 | static slab_t * slab_space_alloc(slab_cache_t *cache, int flags) |
||
| 159 | { |
||
| 160 | void *data; |
||
| 161 | slab_t *slab; |
||
| 162 | size_t fsize; |
||
| 163 | int i; |
||
| 2123 | decky | 164 | unsigned int zone = 0; |
| 814 | palkovsky | 165 | |
| 1766 | palkovsky | 166 | data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone); |
| 167 | if (!data) { |
||
| 762 | palkovsky | 168 | return NULL; |
| 764 | palkovsky | 169 | } |
| 768 | palkovsky | 170 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) { |
| 769 | palkovsky | 171 | slab = slab_alloc(slab_extern_cache, flags); |
| 762 | palkovsky | 172 | if (!slab) { |
| 1760 | palkovsky | 173 | frame_free(KA2PA(data)); |
| 762 | palkovsky | 174 | return NULL; |
| 175 | } |
||
| 176 | } else { |
||
| 177 | fsize = (PAGE_SIZE << cache->order); |
||
| 178 | slab = data + fsize - sizeof(*slab); |
||
| 179 | } |
||
| 1288 | jermar | 180 | |
| 762 | palkovsky | 181 | /* Fill in slab structures */ |
| 814 | palkovsky | 182 | for (i=0; i < (1 << cache->order); i++) |
| 1760 | palkovsky | 183 | frame_set_parent(ADDR2PFN(KA2PA(data))+i, slab, zone); |
| 762 | palkovsky | 184 | |
| 185 | slab->start = data; |
||
| 186 | slab->available = cache->objects; |
||
| 187 | slab->nextavail = 0; |
||
| 767 | palkovsky | 188 | slab->cache = cache; |
| 762 | palkovsky | 189 | |
| 190 | for (i=0; i<cache->objects;i++) |
||
| 191 | *((int *) (slab->start + i*cache->size)) = i+1; |
||
| 764 | palkovsky | 192 | |
| 193 | atomic_inc(&cache->allocated_slabs); |
||
| 762 | palkovsky | 194 | return slab; |
| 195 | } |
||
| 196 | |||
| 759 | palkovsky | 197 | /** |
| 1248 | jermar | 198 | * Deallocate space associated with slab |
| 762 | palkovsky | 199 | * |
| 200 | * @return number of freed frames |
||
| 201 | */ |
||
| 202 | static count_t slab_space_free(slab_cache_t *cache, slab_t *slab) |
||
| 203 | { |
||
| 1760 | palkovsky | 204 | frame_free(KA2PA(slab->start)); |
| 768 | palkovsky | 205 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) |
| 769 | palkovsky | 206 | slab_free(slab_extern_cache, slab); |
| 764 | palkovsky | 207 | |
| 208 | atomic_dec(&cache->allocated_slabs); |
||
| 209 | |||
| 762 | palkovsky | 210 | return 1 << cache->order; |
| 211 | } |
||
| 212 | |||
| 213 | /** Map object to slab structure */ |
||
| 214 | static slab_t * obj2slab(void *obj) |
||
| 215 | { |
||
| 814 | palkovsky | 216 | return (slab_t *)frame_get_parent(ADDR2PFN(KA2PA(obj)), 0); |
| 762 | palkovsky | 217 | } |
| 218 | |||
| 219 | /**************************************/ |
||
| 1248 | jermar | 220 | /* Slab functions */ |
| 762 | palkovsky | 221 | |
| 222 | |||
| 223 | /** |
||
| 759 | palkovsky | 224 | * Return object to slab and call a destructor |
| 225 | * |
||
| 762 | palkovsky | 226 | * @param slab If the caller knows directly slab of the object, otherwise NULL |
| 227 | * |
||
| 759 | palkovsky | 228 | * @return Number of freed pages |
| 229 | */ |
||
| 762 | palkovsky | 230 | static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, |
| 231 | slab_t *slab) |
||
| 759 | palkovsky | 232 | { |
| 787 | palkovsky | 233 | int freed = 0; |
| 234 | |||
| 762 | palkovsky | 235 | if (!slab) |
| 236 | slab = obj2slab(obj); |
||
| 237 | |||
| 767 | palkovsky | 238 | ASSERT(slab->cache == cache); |
| 239 | |||
| 787 | palkovsky | 240 | if (cache->destructor) |
| 241 | freed = cache->destructor(obj); |
||
| 242 | |||
| 776 | palkovsky | 243 | spinlock_lock(&cache->slablock); |
| 789 | palkovsky | 244 | ASSERT(slab->available < cache->objects); |
| 776 | palkovsky | 245 | |
| 762 | palkovsky | 246 | *((int *)obj) = slab->nextavail; |
| 247 | slab->nextavail = (obj - slab->start)/cache->size; |
||
| 248 | slab->available++; |
||
| 249 | |||
| 250 | /* Move it to correct list */ |
||
| 251 | if (slab->available == cache->objects) { |
||
| 252 | /* Free associated memory */ |
||
| 253 | list_remove(&slab->link); |
||
| 782 | palkovsky | 254 | spinlock_unlock(&cache->slablock); |
| 255 | |||
| 787 | palkovsky | 256 | return freed + slab_space_free(cache, slab); |
| 782 | palkovsky | 257 | |
| 780 | palkovsky | 258 | } else if (slab->available == 1) { |
| 259 | /* It was in full, move to partial */ |
||
| 260 | list_remove(&slab->link); |
||
| 261 | list_prepend(&slab->link, &cache->partial_slabs); |
||
| 762 | palkovsky | 262 | } |
| 783 | palkovsky | 263 | spinlock_unlock(&cache->slablock); |
| 787 | palkovsky | 264 | return freed; |
| 759 | palkovsky | 265 | } |
| 266 | |||
| 267 | /** |
||
| 268 | * Take new object from slab or create new if needed |
||
| 269 | * |
||
| 270 | * @return Object address or null |
||
| 271 | */ |
||
| 272 | static void * slab_obj_create(slab_cache_t *cache, int flags) |
||
| 273 | { |
||
| 762 | palkovsky | 274 | slab_t *slab; |
| 275 | void *obj; |
||
| 276 | |||
| 776 | palkovsky | 277 | spinlock_lock(&cache->slablock); |
| 278 | |||
| 762 | palkovsky | 279 | if (list_empty(&cache->partial_slabs)) { |
| 280 | /* Allow recursion and reclaiming |
||
| 1248 | jermar | 281 | * - this should work, as the slab control structures |
| 1288 | jermar | 282 | * are small and do not need to allocate with anything |
| 283 | * other than frame_alloc when they are allocating, |
||
| 762 | palkovsky | 284 | * that's why we should get recursion at most 1-level deep |
| 285 | */ |
||
| 776 | palkovsky | 286 | spinlock_unlock(&cache->slablock); |
| 762 | palkovsky | 287 | slab = slab_space_alloc(cache, flags); |
| 780 | palkovsky | 288 | if (!slab) |
| 289 | return NULL; |
||
| 776 | palkovsky | 290 | spinlock_lock(&cache->slablock); |
| 762 | palkovsky | 291 | } else { |
| 1950 | jermar | 292 | slab = list_get_instance(cache->partial_slabs.next, slab_t, link); |
| 762 | palkovsky | 293 | list_remove(&slab->link); |
| 294 | } |
||
| 295 | obj = slab->start + slab->nextavail * cache->size; |
||
| 296 | slab->nextavail = *((int *)obj); |
||
| 297 | slab->available--; |
||
| 787 | palkovsky | 298 | |
| 1950 | jermar | 299 | if (!slab->available) |
| 764 | palkovsky | 300 | list_prepend(&slab->link, &cache->full_slabs); |
| 762 | palkovsky | 301 | else |
| 764 | palkovsky | 302 | list_prepend(&slab->link, &cache->partial_slabs); |
| 776 | palkovsky | 303 | |
| 304 | spinlock_unlock(&cache->slablock); |
||
| 787 | palkovsky | 305 | |
| 306 | if (cache->constructor && cache->constructor(obj, flags)) { |
||
| 307 | /* Bad, bad, construction failed */ |
||
| 308 | slab_obj_destroy(cache, obj, slab); |
||
| 309 | return NULL; |
||
| 310 | } |
||
| 762 | palkovsky | 311 | return obj; |
| 759 | palkovsky | 312 | } |
| 313 | |||
| 314 | /**************************************/ |
||
| 315 | /* CPU-Cache slab functions */ |
||
| 316 | |||
| 317 | /** |
||
| 781 | palkovsky | 318 | * Finds a full magazine in cache, takes it from list |
| 319 | * and returns it |
||
| 320 | * |
||
| 321 | * @param first If true, return first, else last mag |
||
| 322 | */ |
||
| 323 | static slab_magazine_t * get_mag_from_cache(slab_cache_t *cache, |
||
| 324 | int first) |
||
| 325 | { |
||
| 326 | slab_magazine_t *mag = NULL; |
||
| 327 | link_t *cur; |
||
| 328 | |||
| 329 | spinlock_lock(&cache->maglock); |
||
| 330 | if (!list_empty(&cache->magazines)) { |
||
| 331 | if (first) |
||
| 332 | cur = cache->magazines.next; |
||
| 333 | else |
||
| 334 | cur = cache->magazines.prev; |
||
| 335 | mag = list_get_instance(cur, slab_magazine_t, link); |
||
| 336 | list_remove(&mag->link); |
||
| 337 | atomic_dec(&cache->magazine_counter); |
||
| 338 | } |
||
| 339 | spinlock_unlock(&cache->maglock); |
||
| 340 | return mag; |
||
| 341 | } |
||
| 342 | |||
| 343 | /** Prepend magazine to magazine list in cache */ |
||
| 344 | static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag) |
||
| 345 | { |
||
| 346 | spinlock_lock(&cache->maglock); |
||
| 347 | |||
| 348 | list_prepend(&mag->link, &cache->magazines); |
||
| 349 | atomic_inc(&cache->magazine_counter); |
||
| 350 | |||
| 351 | spinlock_unlock(&cache->maglock); |
||
| 352 | } |
||
| 353 | |||
| 354 | /** |
||
| 759 | palkovsky | 355 | * Free all objects in magazine and free memory associated with magazine |
| 356 | * |
||
| 357 | * @return Number of freed pages |
||
| 358 | */ |
||
| 359 | static count_t magazine_destroy(slab_cache_t *cache, |
||
| 360 | slab_magazine_t *mag) |
||
| 361 | { |
||
| 362 | int i; |
||
| 363 | count_t frames = 0; |
||
| 364 | |||
| 767 | palkovsky | 365 | for (i=0;i < mag->busy; i++) { |
| 762 | palkovsky | 366 | frames += slab_obj_destroy(cache, mag->objs[i], NULL); |
| 767 | palkovsky | 367 | atomic_dec(&cache->cached_objs); |
| 368 | } |
||
| 759 | palkovsky | 369 | |
| 370 | slab_free(&mag_cache, mag); |
||
| 371 | |||
| 372 | return frames; |
||
| 373 | } |
||
| 374 | |||
| 375 | /** |
||
| 769 | palkovsky | 376 | * Find full magazine, set it as current and return it |
| 377 | * |
||
| 378 | * Assume cpu_magazine lock is held |
||
| 379 | */ |
||
| 380 | static slab_magazine_t * get_full_current_mag(slab_cache_t *cache) |
||
| 381 | { |
||
| 382 | slab_magazine_t *cmag, *lastmag, *newmag; |
||
| 383 | |||
| 384 | cmag = cache->mag_cache[CPU->id].current; |
||
| 385 | lastmag = cache->mag_cache[CPU->id].last; |
||
| 386 | if (cmag) { /* First try local CPU magazines */ |
||
| 387 | if (cmag->busy) |
||
| 388 | return cmag; |
||
| 389 | |||
| 390 | if (lastmag && lastmag->busy) { |
||
| 391 | cache->mag_cache[CPU->id].current = lastmag; |
||
| 392 | cache->mag_cache[CPU->id].last = cmag; |
||
| 393 | return lastmag; |
||
| 394 | } |
||
| 395 | } |
||
| 396 | /* Local magazines are empty, import one from magazine list */ |
||
| 781 | palkovsky | 397 | newmag = get_mag_from_cache(cache, 1); |
| 398 | if (!newmag) |
||
| 769 | palkovsky | 399 | return NULL; |
| 400 | |||
| 401 | if (lastmag) |
||
| 781 | palkovsky | 402 | magazine_destroy(cache, lastmag); |
| 403 | |||
| 769 | palkovsky | 404 | cache->mag_cache[CPU->id].last = cmag; |
| 405 | cache->mag_cache[CPU->id].current = newmag; |
||
| 406 | return newmag; |
||
| 407 | } |
||
| 408 | |||
| 409 | /** |
||
| 759 | palkovsky | 410 | * Try to find object in CPU-cache magazines |
| 411 | * |
||
| 412 | * @return Pointer to object or NULL if not available |
||
| 413 | */ |
||
| 414 | static void * magazine_obj_get(slab_cache_t *cache) |
||
| 415 | { |
||
| 416 | slab_magazine_t *mag; |
||
| 767 | palkovsky | 417 | void *obj; |
| 759 | palkovsky | 418 | |
| 772 | palkovsky | 419 | if (!CPU) |
| 420 | return NULL; |
||
| 421 | |||
| 759 | palkovsky | 422 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
| 423 | |||
| 769 | palkovsky | 424 | mag = get_full_current_mag(cache); |
| 425 | if (!mag) { |
||
| 426 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
||
| 427 | return NULL; |
||
| 759 | palkovsky | 428 | } |
| 767 | palkovsky | 429 | obj = mag->objs[--mag->busy]; |
| 759 | palkovsky | 430 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
| 767 | palkovsky | 431 | atomic_dec(&cache->cached_objs); |
| 432 | |||
| 433 | return obj; |
||
| 759 | palkovsky | 434 | } |
| 435 | |||
| 436 | /** |
||
| 768 | palkovsky | 437 | * Assure that the current magazine is empty, return pointer to it, or NULL if |
| 769 | palkovsky | 438 | * no empty magazine is available and cannot be allocated |
| 759 | palkovsky | 439 | * |
| 773 | palkovsky | 440 | * Assume mag_cache[CPU->id].lock is held |
| 441 | * |
||
| 759 | palkovsky | 442 | * We have 2 magazines bound to processor. |
| 443 | * First try the current. |
||
| 444 | * If full, try the last. |
||
| 445 | * If full, put to magazines list. |
||
| 446 | * allocate new, exchange last & current |
||
| 447 | * |
||
| 768 | palkovsky | 448 | */ |
| 449 | static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache) |
||
| 450 | { |
||
| 451 | slab_magazine_t *cmag,*lastmag,*newmag; |
||
| 452 | |||
| 453 | cmag = cache->mag_cache[CPU->id].current; |
||
| 454 | lastmag = cache->mag_cache[CPU->id].last; |
||
| 455 | |||
| 456 | if (cmag) { |
||
| 457 | if (cmag->busy < cmag->size) |
||
| 458 | return cmag; |
||
| 459 | if (lastmag && lastmag->busy < lastmag->size) { |
||
| 460 | cache->mag_cache[CPU->id].last = cmag; |
||
| 461 | cache->mag_cache[CPU->id].current = lastmag; |
||
| 462 | return lastmag; |
||
| 463 | } |
||
| 464 | } |
||
| 465 | /* current | last are full | nonexistent, allocate new */ |
||
| 466 | /* We do not want to sleep just because of caching */ |
||
| 467 | /* Especially we do not want reclaiming to start, as |
||
| 468 | * this would deadlock */ |
||
| 469 | newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); |
||
| 470 | if (!newmag) |
||
| 471 | return NULL; |
||
| 472 | newmag->size = SLAB_MAG_SIZE; |
||
| 473 | newmag->busy = 0; |
||
| 474 | |||
| 475 | /* Flush last to magazine list */ |
||
| 781 | palkovsky | 476 | if (lastmag) |
| 477 | put_mag_to_cache(cache, lastmag); |
||
| 478 | |||
| 768 | palkovsky | 479 | /* Move current as last, save new as current */ |
| 480 | cache->mag_cache[CPU->id].last = cmag; |
||
| 481 | cache->mag_cache[CPU->id].current = newmag; |
||
| 482 | |||
| 483 | return newmag; |
||
| 484 | } |
||
| 485 | |||
| 486 | /** |
||
| 487 | * Put object into CPU-cache magazine |
||
| 488 | * |
||
| 759 | palkovsky | 489 | * @return 0 - success, -1 - could not get memory |
| 490 | */ |
||
| 491 | static int magazine_obj_put(slab_cache_t *cache, void *obj) |
||
| 492 | { |
||
| 493 | slab_magazine_t *mag; |
||
| 494 | |||
| 772 | palkovsky | 495 | if (!CPU) |
| 496 | return -1; |
||
| 497 | |||
| 759 | palkovsky | 498 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
| 768 | palkovsky | 499 | |
| 500 | mag = make_empty_current_mag(cache); |
||
| 769 | palkovsky | 501 | if (!mag) { |
| 502 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
||
| 503 | return -1; |
||
| 504 | } |
||
| 759 | palkovsky | 505 | |
| 506 | mag->objs[mag->busy++] = obj; |
||
| 507 | |||
| 508 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
||
| 767 | palkovsky | 509 | atomic_inc(&cache->cached_objs); |
| 759 | palkovsky | 510 | return 0; |
| 511 | } |
||
| 512 | |||
| 513 | |||
| 514 | /**************************************/ |
||
| 1248 | jermar | 515 | /* Slab cache functions */ |
| 759 | palkovsky | 516 | |
| 762 | palkovsky | 517 | /** Return number of objects that fit in certain cache size */ |
| 518 | static int comp_objects(slab_cache_t *cache) |
||
| 519 | { |
||
| 520 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
||
| 521 | return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size; |
||
| 522 | else |
||
| 523 | return (PAGE_SIZE << cache->order) / cache->size; |
||
| 524 | } |
||
| 525 | |||
| 526 | /** Return wasted space in slab */ |
||
| 527 | static int badness(slab_cache_t *cache) |
||
| 528 | { |
||
| 529 | int objects; |
||
| 530 | int ssize; |
||
| 531 | |||
| 532 | objects = comp_objects(cache); |
||
| 533 | ssize = PAGE_SIZE << cache->order; |
||
| 534 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
||
| 535 | ssize -= sizeof(slab_t); |
||
| 536 | return ssize - objects*cache->size; |
||
| 537 | } |
||
| 538 | |||
| 789 | palkovsky | 539 | /** |
| 540 | * Initialize mag_cache structure in slab cache |
||
| 541 | */ |
||
| 542 | static void make_magcache(slab_cache_t *cache) |
||
| 543 | { |
||
| 544 | int i; |
||
| 791 | palkovsky | 545 | |
| 546 | ASSERT(_slab_initialized >= 2); |
||
| 789 | palkovsky | 547 | |
| 822 | palkovsky | 548 | cache->mag_cache = malloc(sizeof(slab_mag_cache_t)*config.cpu_count,0); |
| 789 | palkovsky | 549 | for (i=0; i < config.cpu_count; i++) { |
| 1780 | jermar | 550 | memsetb((uintptr_t)&cache->mag_cache[i], |
| 789 | palkovsky | 551 | sizeof(cache->mag_cache[i]), 0); |
| 552 | spinlock_initialize(&cache->mag_cache[i].lock, |
||
| 553 | "slab_maglock_cpu"); |
||
| 554 | } |
||
| 555 | } |
||
| 556 | |||
| 759 | palkovsky | 557 | /** Initialize allocated memory as a slab cache */ |
| 558 | static void |
||
| 559 | _slab_cache_create(slab_cache_t *cache, |
||
| 560 | char *name, |
||
| 561 | size_t size, |
||
| 562 | size_t align, |
||
| 563 | int (*constructor)(void *obj, int kmflag), |
||
| 787 | palkovsky | 564 | int (*destructor)(void *obj), |
| 759 | palkovsky | 565 | int flags) |
| 566 | { |
||
| 771 | palkovsky | 567 | int pages; |
| 783 | palkovsky | 568 | ipl_t ipl; |
| 759 | palkovsky | 569 | |
| 1780 | jermar | 570 | memsetb((uintptr_t)cache, sizeof(*cache), 0); |
| 759 | palkovsky | 571 | cache->name = name; |
| 572 | |||
| 1780 | jermar | 573 | if (align < sizeof(unative_t)) |
| 574 | align = sizeof(unative_t); |
||
| 766 | palkovsky | 575 | size = ALIGN_UP(size, align); |
| 576 | |||
| 762 | palkovsky | 577 | cache->size = size; |
| 759 | palkovsky | 578 | |
| 579 | cache->constructor = constructor; |
||
| 580 | cache->destructor = destructor; |
||
| 581 | cache->flags = flags; |
||
| 582 | |||
| 583 | list_initialize(&cache->full_slabs); |
||
| 584 | list_initialize(&cache->partial_slabs); |
||
| 585 | list_initialize(&cache->magazines); |
||
| 776 | palkovsky | 586 | spinlock_initialize(&cache->slablock, "slab_lock"); |
| 587 | spinlock_initialize(&cache->maglock, "slab_maglock"); |
||
| 789 | palkovsky | 588 | if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) |
| 589 | make_magcache(cache); |
||
| 759 | palkovsky | 590 | |
| 591 | /* Compute slab sizes, object counts in slabs etc. */ |
||
| 592 | if (cache->size < SLAB_INSIDE_SIZE) |
||
| 593 | cache->flags |= SLAB_CACHE_SLINSIDE; |
||
| 594 | |||
| 762 | palkovsky | 595 | /* Minimum slab order */ |
| 1682 | palkovsky | 596 | pages = SIZE2FRAMES(cache->size); |
| 1677 | palkovsky | 597 | /* We need the 2^order >= pages */ |
| 598 | if (pages == 1) |
||
| 599 | cache->order = 0; |
||
| 600 | else |
||
| 601 | cache->order = fnzb(pages-1)+1; |
||
| 766 | palkovsky | 602 | |
| 762 | palkovsky | 603 | while (badness(cache) > SLAB_MAX_BADNESS(cache)) { |
| 604 | cache->order += 1; |
||
| 605 | } |
||
| 606 | cache->objects = comp_objects(cache); |
||
| 766 | palkovsky | 607 | /* If info fits in, put it inside */ |
| 608 | if (badness(cache) > sizeof(slab_t)) |
||
| 609 | cache->flags |= SLAB_CACHE_SLINSIDE; |
||
| 762 | palkovsky | 610 | |
| 783 | palkovsky | 611 | /* Add cache to cache list */ |
| 612 | ipl = interrupts_disable(); |
||
| 759 | palkovsky | 613 | spinlock_lock(&slab_cache_lock); |
| 614 | |||
| 615 | list_append(&cache->link, &slab_cache_list); |
||
| 616 | |||
| 617 | spinlock_unlock(&slab_cache_lock); |
||
| 783 | palkovsky | 618 | interrupts_restore(ipl); |
| 759 | palkovsky | 619 | } |
| 620 | |||
| 621 | /** Create slab cache */ |
||
| 622 | slab_cache_t * slab_cache_create(char *name, |
||
| 623 | size_t size, |
||
| 624 | size_t align, |
||
| 625 | int (*constructor)(void *obj, int kmflag), |
||
| 787 | palkovsky | 626 | int (*destructor)(void *obj), |
| 759 | palkovsky | 627 | int flags) |
| 628 | { |
||
| 629 | slab_cache_t *cache; |
||
| 630 | |||
| 769 | palkovsky | 631 | cache = slab_alloc(&slab_cache_cache, 0); |
| 759 | palkovsky | 632 | _slab_cache_create(cache, name, size, align, constructor, destructor, |
| 633 | flags); |
||
| 634 | return cache; |
||
| 635 | } |
||
| 636 | |||
| 637 | /** |
||
| 638 | * Reclaim space occupied by objects that are already free |
||
| 639 | * |
||
| 640 | * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing |
||
| 641 | * @return Number of freed pages |
||
| 642 | */ |
||
| 643 | static count_t _slab_reclaim(slab_cache_t *cache, int flags) |
||
| 644 | { |
||
| 645 | int i; |
||
| 646 | slab_magazine_t *mag; |
||
| 647 | count_t frames = 0; |
||
| 781 | palkovsky | 648 | int magcount; |
| 759 | palkovsky | 649 | |
| 650 | if (cache->flags & SLAB_CACHE_NOMAGAZINE) |
||
| 651 | return 0; /* Nothing to do */ |
||
| 781 | palkovsky | 652 | |
| 653 | /* We count up to original magazine count to avoid |
||
| 654 | * endless loop |
||
| 655 | */ |
||
| 656 | magcount = atomic_get(&cache->magazine_counter); |
||
| 657 | while (magcount-- && (mag=get_mag_from_cache(cache,0))) { |
||
| 658 | frames += magazine_destroy(cache,mag); |
||
| 659 | if (!(flags & SLAB_RECLAIM_ALL) && frames) |
||
| 660 | break; |
||
| 769 | palkovsky | 661 | } |
| 759 | palkovsky | 662 | |
| 663 | if (flags & SLAB_RECLAIM_ALL) { |
||
| 781 | palkovsky | 664 | /* Free cpu-bound magazines */ |
| 759 | palkovsky | 665 | /* Destroy CPU magazines */ |
| 666 | for (i=0; i<config.cpu_count; i++) { |
||
| 781 | palkovsky | 667 | spinlock_lock(&cache->mag_cache[i].lock); |
| 668 | |||
| 759 | palkovsky | 669 | mag = cache->mag_cache[i].current; |
| 670 | if (mag) |
||
| 671 | frames += magazine_destroy(cache, mag); |
||
| 672 | cache->mag_cache[i].current = NULL; |
||
| 673 | |||
| 674 | mag = cache->mag_cache[i].last; |
||
| 675 | if (mag) |
||
| 676 | frames += magazine_destroy(cache, mag); |
||
| 677 | cache->mag_cache[i].last = NULL; |
||
| 781 | palkovsky | 678 | |
| 679 | spinlock_unlock(&cache->mag_cache[i].lock); |
||
| 759 | palkovsky | 680 | } |
| 681 | } |
||
| 767 | palkovsky | 682 | |
| 759 | palkovsky | 683 | return frames; |
| 684 | } |
||
| 685 | |||
| 686 | /** Check that there are no slabs and remove cache from system */ |
||
| 687 | void slab_cache_destroy(slab_cache_t *cache) |
||
| 688 | { |
||
| 781 | palkovsky | 689 | ipl_t ipl; |
| 690 | |||
| 691 | /* First remove cache from link, so that we don't need |
||
| 692 | * to disable interrupts later |
||
| 693 | */ |
||
| 694 | |||
| 695 | ipl = interrupts_disable(); |
||
| 696 | spinlock_lock(&slab_cache_lock); |
||
| 697 | |||
| 698 | list_remove(&cache->link); |
||
| 699 | |||
| 700 | spinlock_unlock(&slab_cache_lock); |
||
| 701 | interrupts_restore(ipl); |
||
| 702 | |||
| 759 | palkovsky | 703 | /* Do not lock anything, we assume the software is correct and |
| 704 | * does not touch the cache when it decides to destroy it */ |
||
| 705 | |||
| 706 | /* Destroy all magazines */ |
||
| 707 | _slab_reclaim(cache, SLAB_RECLAIM_ALL); |
||
| 708 | |||
| 709 | /* All slabs must be empty */ |
||
| 710 | if (!list_empty(&cache->full_slabs) \ |
||
| 711 | || !list_empty(&cache->partial_slabs)) |
||
| 712 | panic("Destroying cache that is not empty."); |
||
| 713 | |||
| 789 | palkovsky | 714 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) |
| 822 | palkovsky | 715 | free(cache->mag_cache); |
| 769 | palkovsky | 716 | slab_free(&slab_cache_cache, cache); |
| 759 | palkovsky | 717 | } |
| 718 | |||
| 719 | /** Allocate new object from cache - if no flags given, always returns |
||
| 720 | memory */ |
||
| 721 | void * slab_alloc(slab_cache_t *cache, int flags) |
||
| 722 | { |
||
| 723 | ipl_t ipl; |
||
| 724 | void *result = NULL; |
||
| 773 | palkovsky | 725 | |
| 759 | palkovsky | 726 | /* Disable interrupts to avoid deadlocks with interrupt handlers */ |
| 727 | ipl = interrupts_disable(); |
||
| 771 | palkovsky | 728 | |
| 814 | palkovsky | 729 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) { |
| 759 | palkovsky | 730 | result = magazine_obj_get(cache); |
| 814 | palkovsky | 731 | } |
| 776 | palkovsky | 732 | if (!result) |
| 759 | palkovsky | 733 | result = slab_obj_create(cache, flags); |
| 734 | |||
| 769 | palkovsky | 735 | interrupts_restore(ipl); |
| 736 | |||
| 764 | palkovsky | 737 | if (result) |
| 738 | atomic_inc(&cache->allocated_objs); |
||
| 739 | |||
| 759 | palkovsky | 740 | return result; |
| 741 | } |
||
| 742 | |||
| 771 | palkovsky | 743 | /** Return object to cache, use slab if known */ |
| 744 | static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) |
||
| 759 | palkovsky | 745 | { |
| 746 | ipl_t ipl; |
||
| 747 | |||
| 748 | ipl = interrupts_disable(); |
||
| 749 | |||
| 762 | palkovsky | 750 | if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \ |
| 751 | || magazine_obj_put(cache, obj)) { |
||
| 776 | palkovsky | 752 | |
| 771 | palkovsky | 753 | slab_obj_destroy(cache, obj, slab); |
| 776 | palkovsky | 754 | |
| 759 | palkovsky | 755 | } |
| 769 | palkovsky | 756 | interrupts_restore(ipl); |
| 764 | palkovsky | 757 | atomic_dec(&cache->allocated_objs); |
| 759 | palkovsky | 758 | } |
| 759 | |||
| 771 | palkovsky | 760 | /** Return slab object to cache */ |
| 761 | void slab_free(slab_cache_t *cache, void *obj) |
||
| 762 | { |
||
| 763 | _slab_free(cache,obj,NULL); |
||
| 764 | } |
||
| 765 | |||
| 759 | palkovsky | 766 | /* Go through all caches and reclaim what is possible */ |
| 767 | count_t slab_reclaim(int flags) |
||
| 768 | { |
||
| 769 | slab_cache_t *cache; |
||
| 770 | link_t *cur; |
||
| 771 | count_t frames = 0; |
||
| 772 | |||
| 773 | spinlock_lock(&slab_cache_lock); |
||
| 774 | |||
| 776 | palkovsky | 775 | /* TODO: Add assert, that interrupts are disabled, otherwise |
| 776 | * memory allocation from interrupts can deadlock. |
||
| 777 | */ |
||
| 778 | |||
| 759 | palkovsky | 779 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
| 780 | cache = list_get_instance(cur, slab_cache_t, link); |
||
| 781 | frames += _slab_reclaim(cache, flags); |
||
| 782 | } |
||
| 783 | |||
| 784 | spinlock_unlock(&slab_cache_lock); |
||
| 785 | |||
| 786 | return frames; |
||
| 787 | } |
||
| 788 | |||
| 789 | |||
| 790 | /* Print list of slabs */ |
||
| 791 | void slab_print_list(void) |
||
| 792 | { |
||
| 793 | slab_cache_t *cache; |
||
| 794 | link_t *cur; |
||
| 783 | palkovsky | 795 | ipl_t ipl; |
| 796 | |||
| 797 | ipl = interrupts_disable(); |
||
| 759 | palkovsky | 798 | spinlock_lock(&slab_cache_lock); |
| 2052 | decky | 799 | printf("slab name size pages obj/pg slabs cached allocated ctl\n"); |
| 800 | printf("---------------- -------- ------ ------ ------ ------ --------- ---\n"); |
||
| 801 | |||
| 802 | for (cur = slab_cache_list.next; cur != &slab_cache_list; cur = cur->next) { |
||
| 759 | palkovsky | 803 | cache = list_get_instance(cur, slab_cache_t, link); |
| 2052 | decky | 804 | |
| 805 | printf("%-16s %8zd %6zd %6zd %6zd %6zd %9zd %-3s\n", cache->name, cache->size, (1 << cache->order), cache->objects, atomic_get(&cache->allocated_slabs), atomic_get(&cache->cached_objs), atomic_get(&cache->allocated_objs), cache->flags & SLAB_CACHE_SLINSIDE ? "in" : "out"); |
||
| 759 | palkovsky | 806 | } |
| 807 | spinlock_unlock(&slab_cache_lock); |
||
| 783 | palkovsky | 808 | interrupts_restore(ipl); |
| 759 | palkovsky | 809 | } |
| 810 | |||
| 811 | void slab_cache_init(void) |
||
| 812 | { |
||
| 771 | palkovsky | 813 | int i, size; |
| 814 | |||
| 759 | palkovsky | 815 | /* Initialize magazine cache */ |
| 816 | _slab_cache_create(&mag_cache, |
||
| 817 | "slab_magazine", |
||
| 818 | sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*), |
||
| 1780 | jermar | 819 | sizeof(uintptr_t), |
| 759 | palkovsky | 820 | NULL, NULL, |
| 769 | palkovsky | 821 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
| 822 | /* Initialize slab_cache cache */ |
||
| 823 | _slab_cache_create(&slab_cache_cache, |
||
| 824 | "slab_cache", |
||
| 789 | palkovsky | 825 | sizeof(slab_cache_cache), |
| 1780 | jermar | 826 | sizeof(uintptr_t), |
| 769 | palkovsky | 827 | NULL, NULL, |
| 828 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
||
| 829 | /* Initialize external slab cache */ |
||
| 830 | slab_extern_cache = slab_cache_create("slab_extern", |
||
| 831 | sizeof(slab_t), |
||
| 832 | 0, NULL, NULL, |
||
| 789 | palkovsky | 833 | SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED); |
| 759 | palkovsky | 834 | |
| 835 | /* Initialize structures for malloc */ |
||
| 771 | palkovsky | 836 | for (i=0, size=(1<<SLAB_MIN_MALLOC_W); |
| 837 | i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1); |
||
| 838 | i++, size <<= 1) { |
||
| 839 | malloc_caches[i] = slab_cache_create(malloc_names[i], |
||
| 840 | size, 0, |
||
| 789 | palkovsky | 841 | NULL,NULL, SLAB_CACHE_MAGDEFERRED); |
| 771 | palkovsky | 842 | } |
| 778 | palkovsky | 843 | #ifdef CONFIG_DEBUG |
| 844 | _slab_initialized = 1; |
||
| 845 | #endif |
||
| 759 | palkovsky | 846 | } |
| 771 | palkovsky | 847 | |
| 789 | palkovsky | 848 | /** Enable cpu_cache |
| 849 | * |
||
| 850 | * Kernel calls this function, when it knows the real number of |
||
| 851 | * processors. |
||
| 852 | * Allocate slab for cpucache and enable it on all existing |
||
| 853 | * slabs that are SLAB_CACHE_MAGDEFERRED |
||
| 854 | */ |
||
| 855 | void slab_enable_cpucache(void) |
||
| 856 | { |
||
| 857 | link_t *cur; |
||
| 858 | slab_cache_t *s; |
||
| 859 | |||
| 791 | palkovsky | 860 | #ifdef CONFIG_DEBUG |
| 861 | _slab_initialized = 2; |
||
| 862 | #endif |
||
| 863 | |||
| 789 | palkovsky | 864 | spinlock_lock(&slab_cache_lock); |
| 865 | |||
| 866 | for (cur=slab_cache_list.next; cur != &slab_cache_list;cur=cur->next){ |
||
| 867 | s = list_get_instance(cur, slab_cache_t, link); |
||
| 868 | if ((s->flags & SLAB_CACHE_MAGDEFERRED) != SLAB_CACHE_MAGDEFERRED) |
||
| 869 | continue; |
||
| 870 | make_magcache(s); |
||
| 871 | s->flags &= ~SLAB_CACHE_MAGDEFERRED; |
||
| 872 | } |
||
| 873 | |||
| 874 | spinlock_unlock(&slab_cache_lock); |
||
| 875 | } |
||
| 876 | |||
| 771 | palkovsky | 877 | /**************************************/ |
| 878 | /* kalloc/kfree functions */ |
||
| 822 | palkovsky | 879 | void * malloc(unsigned int size, int flags) |
| 771 | palkovsky | 880 | { |
| 881 | int idx; |
||
| 778 | palkovsky | 882 | |
| 883 | ASSERT(_slab_initialized); |
||
| 1288 | jermar | 884 | ASSERT(size && size <= (1 << SLAB_MAX_MALLOC_W)); |
| 771 | palkovsky | 885 | |
| 886 | if (size < (1 << SLAB_MIN_MALLOC_W)) |
||
| 887 | size = (1 << SLAB_MIN_MALLOC_W); |
||
| 888 | |||
| 889 | idx = fnzb(size-1) - SLAB_MIN_MALLOC_W + 1; |
||
| 890 | |||
| 891 | return slab_alloc(malloc_caches[idx], flags); |
||
| 892 | } |
||
| 893 | |||
| 822 | palkovsky | 894 | void free(void *obj) |
| 771 | palkovsky | 895 | { |
| 781 | palkovsky | 896 | slab_t *slab; |
| 897 | |||
| 1950 | jermar | 898 | if (!obj) |
| 899 | return; |
||
| 781 | palkovsky | 900 | |
| 901 | slab = obj2slab(obj); |
||
| 771 | palkovsky | 902 | _slab_free(slab->cache, obj, slab); |
| 903 | } |
||
| 1702 | cejka | 904 | |
| 1757 | jermar | 905 | /** @} |
| 1702 | cejka | 906 | */ |