Rev 789 | Rev 814 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
759 | palkovsky | 1 | /* |
2 | * Copyright (C) 2006 Ondrej Palkovsky |
||
3 | * All rights reserved. |
||
4 | * |
||
5 | * Redistribution and use in source and binary forms, with or without |
||
6 | * modification, are permitted provided that the following conditions |
||
7 | * are met: |
||
8 | * |
||
9 | * - Redistributions of source code must retain the above copyright |
||
10 | * notice, this list of conditions and the following disclaimer. |
||
11 | * - Redistributions in binary form must reproduce the above copyright |
||
12 | * notice, this list of conditions and the following disclaimer in the |
||
13 | * documentation and/or other materials provided with the distribution. |
||
14 | * - The name of the author may not be used to endorse or promote products |
||
15 | * derived from this software without specific prior written permission. |
||
16 | * |
||
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
27 | */ |
||
28 | |||
769 | palkovsky | 29 | /* |
785 | jermar | 30 | * The SLAB allocator is closely modelled after OpenSolaris SLAB allocator |
769 | palkovsky | 31 | * http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/ |
32 | * |
||
33 | * with the following exceptions: |
||
34 | * - empty SLABS are deallocated immediately |
||
35 | * (in Linux they are kept in linked list, in Solaris ???) |
||
36 | * - empty magazines are deallocated when not needed |
||
37 | * (in Solaris they are held in linked list in slab cache) |
||
38 | * |
||
39 | * Following features are not currently supported but would be easy to do: |
||
40 | * - cache coloring |
||
41 | * - dynamic magazine growing (different magazine sizes are already |
||
42 | * supported, but we would need to adjust allocating strategy) |
||
43 | * |
||
44 | * The SLAB allocator supports per-CPU caches ('magazines') to facilitate |
||
45 | * good SMP scaling. |
||
46 | * |
||
47 | * When a new object is being allocated, it is first checked, if it is |
||
48 | * available in CPU-bound magazine. If it is not found there, it is |
||
49 | * allocated from CPU-shared SLAB - if partial full is found, it is used, |
||
50 | * otherwise a new one is allocated. |
||
51 | * |
||
52 | * When an object is being deallocated, it is put to CPU-bound magazine. |
||
53 | * If there is no such magazine, new one is allocated (if it fails, |
||
54 | * the object is deallocated into SLAB). If the magazine is full, it is |
||
55 | * put into cpu-shared list of magazines and new one is allocated. |
||
56 | * |
||
57 | * The CPU-bound magazine is actually a pair of magazine to avoid |
||
58 | * thrashing when somebody is allocating/deallocating 1 item at the magazine |
||
59 | * size boundary. LIFO order is enforced, which should avoid fragmentation |
||
60 | * as much as possible. |
||
61 | * |
||
62 | * Every cache contains list of full slabs and list of partialy full slabs. |
||
63 | * Empty SLABS are immediately freed (thrashing will be avoided because |
||
64 | * of magazines). |
||
65 | * |
||
66 | * The SLAB information structure is kept inside the data area, if possible. |
||
67 | * The cache can be marked that it should not use magazines. This is used |
||
68 | * only for SLAB related caches to avoid deadlocks and infinite recursion |
||
69 | * (the SLAB allocator uses itself for allocating all it's control structures). |
||
70 | * |
||
71 | * The SLAB allocator allocates lot of space and does not free it. When |
||
72 | * frame allocator fails to allocate the frame, it calls slab_reclaim(). |
||
73 | * It tries 'light reclaim' first, then brutal reclaim. The light reclaim |
||
74 | * releases slabs from cpu-shared magazine-list, until at least 1 slab |
||
75 | * is deallocated in each cache (this algorithm should probably change). |
||
76 | * The brutal reclaim removes all cached objects, even from CPU-bound |
||
77 | * magazines. |
||
78 | * |
||
775 | palkovsky | 79 | * TODO: For better CPU-scaling the magazine allocation strategy should |
80 | * be extended. Currently, if the cache does not have magazine, it asks |
||
81 | * for non-cpu cached magazine cache to provide one. It might be feasible |
||
82 | * to add cpu-cached magazine cache (which would allocate it's magazines |
||
83 | * from non-cpu-cached mag. cache). This would provide a nice per-cpu |
||
84 | * buffer. The other possibility is to use the per-cache |
||
85 | * 'empty-magazine-list', which decreases competing for 1 per-system |
||
86 | * magazine cache. |
||
87 | * |
||
776 | palkovsky | 88 | * - it might be good to add granularity of locks even to slab level, |
89 | * we could then try_spinlock over all partial slabs and thus improve |
||
90 | * scalability even on slab level |
||
769 | palkovsky | 91 | */ |
92 | |||
93 | |||
759 | palkovsky | 94 | #include <synch/spinlock.h> |
95 | #include <mm/slab.h> |
||
788 | jermar | 96 | #include <adt/list.h> |
759 | palkovsky | 97 | #include <memstr.h> |
98 | #include <align.h> |
||
99 | #include <mm/heap.h> |
||
762 | palkovsky | 100 | #include <mm/frame.h> |
759 | palkovsky | 101 | #include <config.h> |
102 | #include <print.h> |
||
103 | #include <arch.h> |
||
104 | #include <panic.h> |
||
762 | palkovsky | 105 | #include <debug.h> |
771 | palkovsky | 106 | #include <bitops.h> |
759 | palkovsky | 107 | |
108 | SPINLOCK_INITIALIZE(slab_cache_lock); |
||
769 | palkovsky | 109 | static LIST_INITIALIZE(slab_cache_list); |
759 | palkovsky | 110 | |
769 | palkovsky | 111 | /** Magazine cache */ |
112 | static slab_cache_t mag_cache; |
||
113 | /** Cache for cache descriptors */ |
||
114 | static slab_cache_t slab_cache_cache; |
||
115 | /** Cache for external slab descriptors |
||
116 | * This time we want per-cpu cache, so do not make it static |
||
117 | * - using SLAB for internal SLAB structures will not deadlock, |
||
118 | * as all slab structures are 'small' - control structures of |
||
119 | * their caches do not require further allocation |
||
120 | */ |
||
121 | static slab_cache_t *slab_extern_cache; |
||
771 | palkovsky | 122 | /** Caches for malloc */ |
123 | static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1]; |
||
124 | char *malloc_names[] = { |
||
791 | palkovsky | 125 | "malloc-16","malloc-32","malloc-64","malloc-128", |
771 | palkovsky | 126 | "malloc-256","malloc-512","malloc-1K","malloc-2K", |
127 | "malloc-4K","malloc-8K","malloc-16K","malloc-32K", |
||
128 | "malloc-64K","malloc-128K" |
||
129 | }; |
||
762 | palkovsky | 130 | |
769 | palkovsky | 131 | /** Slab descriptor */ |
762 | palkovsky | 132 | typedef struct { |
133 | slab_cache_t *cache; /**< Pointer to parent cache */ |
||
134 | link_t link; /* List of full/partial slabs */ |
||
135 | void *start; /**< Start address of first available item */ |
||
136 | count_t available; /**< Count of available items in this slab */ |
||
137 | index_t nextavail; /**< The index of next available item */ |
||
138 | }slab_t; |
||
139 | |||
791 | palkovsky | 140 | #ifdef CONFIG_DEBUG |
141 | static int _slab_initialized = 0; |
||
142 | #endif |
||
143 | |||
759 | palkovsky | 144 | /**************************************/ |
762 | palkovsky | 145 | /* SLAB allocation functions */ |
759 | palkovsky | 146 | |
762 | palkovsky | 147 | /** |
148 | * Allocate frames for slab space and initialize |
||
149 | * |
||
150 | */ |
||
151 | static slab_t * slab_space_alloc(slab_cache_t *cache, int flags) |
||
152 | { |
||
153 | void *data; |
||
154 | slab_t *slab; |
||
155 | size_t fsize; |
||
156 | int i; |
||
157 | zone_t *zone = NULL; |
||
158 | int status; |
||
764 | palkovsky | 159 | frame_t *frame; |
759 | palkovsky | 160 | |
786 | bondari | 161 | data = (void *)frame_alloc_rc_zone(cache->order, FRAME_KA | flags, &status, &zone); |
764 | palkovsky | 162 | if (status != FRAME_OK) { |
762 | palkovsky | 163 | return NULL; |
764 | palkovsky | 164 | } |
768 | palkovsky | 165 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) { |
769 | palkovsky | 166 | slab = slab_alloc(slab_extern_cache, flags); |
762 | palkovsky | 167 | if (!slab) { |
168 | frame_free((__address)data); |
||
169 | return NULL; |
||
170 | } |
||
171 | } else { |
||
172 | fsize = (PAGE_SIZE << cache->order); |
||
173 | slab = data + fsize - sizeof(*slab); |
||
174 | } |
||
764 | palkovsky | 175 | |
762 | palkovsky | 176 | /* Fill in slab structures */ |
763 | jermar | 177 | /* TODO: some better way of accessing the frame */ |
766 | palkovsky | 178 | for (i=0; i < (1 << cache->order); i++) { |
764 | palkovsky | 179 | frame = ADDR2FRAME(zone, KA2PA((__address)(data+i*PAGE_SIZE))); |
180 | frame->parent = slab; |
||
762 | palkovsky | 181 | } |
182 | |||
183 | slab->start = data; |
||
184 | slab->available = cache->objects; |
||
185 | slab->nextavail = 0; |
||
767 | palkovsky | 186 | slab->cache = cache; |
762 | palkovsky | 187 | |
188 | for (i=0; i<cache->objects;i++) |
||
189 | *((int *) (slab->start + i*cache->size)) = i+1; |
||
764 | palkovsky | 190 | |
191 | atomic_inc(&cache->allocated_slabs); |
||
762 | palkovsky | 192 | return slab; |
193 | } |
||
194 | |||
759 | palkovsky | 195 | /** |
766 | palkovsky | 196 | * Deallocate space associated with SLAB |
762 | palkovsky | 197 | * |
198 | * @return number of freed frames |
||
199 | */ |
||
200 | static count_t slab_space_free(slab_cache_t *cache, slab_t *slab) |
||
201 | { |
||
202 | frame_free((__address)slab->start); |
||
768 | palkovsky | 203 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) |
769 | palkovsky | 204 | slab_free(slab_extern_cache, slab); |
764 | palkovsky | 205 | |
206 | atomic_dec(&cache->allocated_slabs); |
||
207 | |||
762 | palkovsky | 208 | return 1 << cache->order; |
209 | } |
||
210 | |||
211 | /** Map object to slab structure */ |
||
212 | static slab_t * obj2slab(void *obj) |
||
213 | { |
||
214 | frame_t *frame; |
||
215 | |||
216 | frame = frame_addr2frame((__address)obj); |
||
217 | return (slab_t *)frame->parent; |
||
218 | } |
||
219 | |||
220 | /**************************************/ |
||
221 | /* SLAB functions */ |
||
222 | |||
223 | |||
224 | /** |
||
759 | palkovsky | 225 | * Return object to slab and call a destructor |
226 | * |
||
762 | palkovsky | 227 | * @param slab If the caller knows directly slab of the object, otherwise NULL |
228 | * |
||
759 | palkovsky | 229 | * @return Number of freed pages |
230 | */ |
||
762 | palkovsky | 231 | static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, |
232 | slab_t *slab) |
||
759 | palkovsky | 233 | { |
787 | palkovsky | 234 | int freed = 0; |
235 | |||
762 | palkovsky | 236 | if (!slab) |
237 | slab = obj2slab(obj); |
||
238 | |||
767 | palkovsky | 239 | ASSERT(slab->cache == cache); |
240 | |||
787 | palkovsky | 241 | if (cache->destructor) |
242 | freed = cache->destructor(obj); |
||
243 | |||
776 | palkovsky | 244 | spinlock_lock(&cache->slablock); |
789 | palkovsky | 245 | ASSERT(slab->available < cache->objects); |
776 | palkovsky | 246 | |
762 | palkovsky | 247 | *((int *)obj) = slab->nextavail; |
248 | slab->nextavail = (obj - slab->start)/cache->size; |
||
249 | slab->available++; |
||
250 | |||
251 | /* Move it to correct list */ |
||
252 | if (slab->available == cache->objects) { |
||
253 | /* Free associated memory */ |
||
254 | list_remove(&slab->link); |
||
782 | palkovsky | 255 | spinlock_unlock(&cache->slablock); |
256 | |||
787 | palkovsky | 257 | return freed + slab_space_free(cache, slab); |
782 | palkovsky | 258 | |
780 | palkovsky | 259 | } else if (slab->available == 1) { |
260 | /* It was in full, move to partial */ |
||
261 | list_remove(&slab->link); |
||
262 | list_prepend(&slab->link, &cache->partial_slabs); |
||
762 | palkovsky | 263 | } |
783 | palkovsky | 264 | spinlock_unlock(&cache->slablock); |
787 | palkovsky | 265 | return freed; |
759 | palkovsky | 266 | } |
267 | |||
268 | /** |
||
269 | * Take new object from slab or create new if needed |
||
270 | * |
||
271 | * @return Object address or null |
||
272 | */ |
||
273 | static void * slab_obj_create(slab_cache_t *cache, int flags) |
||
274 | { |
||
762 | palkovsky | 275 | slab_t *slab; |
276 | void *obj; |
||
277 | |||
776 | palkovsky | 278 | spinlock_lock(&cache->slablock); |
279 | |||
762 | palkovsky | 280 | if (list_empty(&cache->partial_slabs)) { |
281 | /* Allow recursion and reclaiming |
||
282 | * - this should work, as the SLAB control structures |
||
283 | * are small and do not need to allocte with anything |
||
284 | * other ten frame_alloc when they are allocating, |
||
285 | * that's why we should get recursion at most 1-level deep |
||
286 | */ |
||
776 | palkovsky | 287 | spinlock_unlock(&cache->slablock); |
762 | palkovsky | 288 | slab = slab_space_alloc(cache, flags); |
780 | palkovsky | 289 | if (!slab) |
290 | return NULL; |
||
776 | palkovsky | 291 | spinlock_lock(&cache->slablock); |
762 | palkovsky | 292 | } else { |
293 | slab = list_get_instance(cache->partial_slabs.next, |
||
294 | slab_t, |
||
295 | link); |
||
296 | list_remove(&slab->link); |
||
297 | } |
||
298 | obj = slab->start + slab->nextavail * cache->size; |
||
299 | slab->nextavail = *((int *)obj); |
||
300 | slab->available--; |
||
787 | palkovsky | 301 | |
762 | palkovsky | 302 | if (! slab->available) |
764 | palkovsky | 303 | list_prepend(&slab->link, &cache->full_slabs); |
762 | palkovsky | 304 | else |
764 | palkovsky | 305 | list_prepend(&slab->link, &cache->partial_slabs); |
776 | palkovsky | 306 | |
307 | spinlock_unlock(&cache->slablock); |
||
787 | palkovsky | 308 | |
309 | if (cache->constructor && cache->constructor(obj, flags)) { |
||
310 | /* Bad, bad, construction failed */ |
||
311 | slab_obj_destroy(cache, obj, slab); |
||
312 | return NULL; |
||
313 | } |
||
762 | palkovsky | 314 | return obj; |
759 | palkovsky | 315 | } |
316 | |||
317 | /**************************************/ |
||
318 | /* CPU-Cache slab functions */ |
||
319 | |||
320 | /** |
||
781 | palkovsky | 321 | * Finds a full magazine in cache, takes it from list |
322 | * and returns it |
||
323 | * |
||
324 | * @param first If true, return first, else last mag |
||
325 | */ |
||
326 | static slab_magazine_t * get_mag_from_cache(slab_cache_t *cache, |
||
327 | int first) |
||
328 | { |
||
329 | slab_magazine_t *mag = NULL; |
||
330 | link_t *cur; |
||
331 | |||
332 | spinlock_lock(&cache->maglock); |
||
333 | if (!list_empty(&cache->magazines)) { |
||
334 | if (first) |
||
335 | cur = cache->magazines.next; |
||
336 | else |
||
337 | cur = cache->magazines.prev; |
||
338 | mag = list_get_instance(cur, slab_magazine_t, link); |
||
339 | list_remove(&mag->link); |
||
340 | atomic_dec(&cache->magazine_counter); |
||
341 | } |
||
342 | spinlock_unlock(&cache->maglock); |
||
343 | return mag; |
||
344 | } |
||
345 | |||
346 | /** Prepend magazine to magazine list in cache */ |
||
347 | static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag) |
||
348 | { |
||
349 | spinlock_lock(&cache->maglock); |
||
350 | |||
351 | list_prepend(&mag->link, &cache->magazines); |
||
352 | atomic_inc(&cache->magazine_counter); |
||
353 | |||
354 | spinlock_unlock(&cache->maglock); |
||
355 | } |
||
356 | |||
357 | /** |
||
759 | palkovsky | 358 | * Free all objects in magazine and free memory associated with magazine |
359 | * |
||
360 | * @return Number of freed pages |
||
361 | */ |
||
362 | static count_t magazine_destroy(slab_cache_t *cache, |
||
363 | slab_magazine_t *mag) |
||
364 | { |
||
365 | int i; |
||
366 | count_t frames = 0; |
||
367 | |||
767 | palkovsky | 368 | for (i=0;i < mag->busy; i++) { |
762 | palkovsky | 369 | frames += slab_obj_destroy(cache, mag->objs[i], NULL); |
767 | palkovsky | 370 | atomic_dec(&cache->cached_objs); |
371 | } |
||
759 | palkovsky | 372 | |
373 | slab_free(&mag_cache, mag); |
||
374 | |||
375 | return frames; |
||
376 | } |
||
377 | |||
378 | /** |
||
769 | palkovsky | 379 | * Find full magazine, set it as current and return it |
380 | * |
||
381 | * Assume cpu_magazine lock is held |
||
382 | */ |
||
383 | static slab_magazine_t * get_full_current_mag(slab_cache_t *cache) |
||
384 | { |
||
385 | slab_magazine_t *cmag, *lastmag, *newmag; |
||
386 | |||
387 | cmag = cache->mag_cache[CPU->id].current; |
||
388 | lastmag = cache->mag_cache[CPU->id].last; |
||
389 | if (cmag) { /* First try local CPU magazines */ |
||
390 | if (cmag->busy) |
||
391 | return cmag; |
||
392 | |||
393 | if (lastmag && lastmag->busy) { |
||
394 | cache->mag_cache[CPU->id].current = lastmag; |
||
395 | cache->mag_cache[CPU->id].last = cmag; |
||
396 | return lastmag; |
||
397 | } |
||
398 | } |
||
399 | /* Local magazines are empty, import one from magazine list */ |
||
781 | palkovsky | 400 | newmag = get_mag_from_cache(cache, 1); |
401 | if (!newmag) |
||
769 | palkovsky | 402 | return NULL; |
403 | |||
404 | if (lastmag) |
||
781 | palkovsky | 405 | magazine_destroy(cache, lastmag); |
406 | |||
769 | palkovsky | 407 | cache->mag_cache[CPU->id].last = cmag; |
408 | cache->mag_cache[CPU->id].current = newmag; |
||
409 | return newmag; |
||
410 | } |
||
411 | |||
412 | /** |
||
759 | palkovsky | 413 | * Try to find object in CPU-cache magazines |
414 | * |
||
415 | * @return Pointer to object or NULL if not available |
||
416 | */ |
||
417 | static void * magazine_obj_get(slab_cache_t *cache) |
||
418 | { |
||
419 | slab_magazine_t *mag; |
||
767 | palkovsky | 420 | void *obj; |
759 | palkovsky | 421 | |
772 | palkovsky | 422 | if (!CPU) |
423 | return NULL; |
||
424 | |||
759 | palkovsky | 425 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
426 | |||
769 | palkovsky | 427 | mag = get_full_current_mag(cache); |
428 | if (!mag) { |
||
429 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
||
430 | return NULL; |
||
759 | palkovsky | 431 | } |
767 | palkovsky | 432 | obj = mag->objs[--mag->busy]; |
759 | palkovsky | 433 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
767 | palkovsky | 434 | atomic_dec(&cache->cached_objs); |
435 | |||
436 | return obj; |
||
759 | palkovsky | 437 | } |
438 | |||
439 | /** |
||
768 | palkovsky | 440 | * Assure that the current magazine is empty, return pointer to it, or NULL if |
769 | palkovsky | 441 | * no empty magazine is available and cannot be allocated |
759 | palkovsky | 442 | * |
773 | palkovsky | 443 | * Assume mag_cache[CPU->id].lock is held |
444 | * |
||
759 | palkovsky | 445 | * We have 2 magazines bound to processor. |
446 | * First try the current. |
||
447 | * If full, try the last. |
||
448 | * If full, put to magazines list. |
||
449 | * allocate new, exchange last & current |
||
450 | * |
||
768 | palkovsky | 451 | */ |
452 | static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache) |
||
453 | { |
||
454 | slab_magazine_t *cmag,*lastmag,*newmag; |
||
455 | |||
456 | cmag = cache->mag_cache[CPU->id].current; |
||
457 | lastmag = cache->mag_cache[CPU->id].last; |
||
458 | |||
459 | if (cmag) { |
||
460 | if (cmag->busy < cmag->size) |
||
461 | return cmag; |
||
462 | if (lastmag && lastmag->busy < lastmag->size) { |
||
463 | cache->mag_cache[CPU->id].last = cmag; |
||
464 | cache->mag_cache[CPU->id].current = lastmag; |
||
465 | return lastmag; |
||
466 | } |
||
467 | } |
||
468 | /* current | last are full | nonexistent, allocate new */ |
||
469 | /* We do not want to sleep just because of caching */ |
||
470 | /* Especially we do not want reclaiming to start, as |
||
471 | * this would deadlock */ |
||
472 | newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); |
||
473 | if (!newmag) |
||
474 | return NULL; |
||
475 | newmag->size = SLAB_MAG_SIZE; |
||
476 | newmag->busy = 0; |
||
477 | |||
478 | /* Flush last to magazine list */ |
||
781 | palkovsky | 479 | if (lastmag) |
480 | put_mag_to_cache(cache, lastmag); |
||
481 | |||
768 | palkovsky | 482 | /* Move current as last, save new as current */ |
483 | cache->mag_cache[CPU->id].last = cmag; |
||
484 | cache->mag_cache[CPU->id].current = newmag; |
||
485 | |||
486 | return newmag; |
||
487 | } |
||
488 | |||
489 | /** |
||
490 | * Put object into CPU-cache magazine |
||
491 | * |
||
759 | palkovsky | 492 | * @return 0 - success, -1 - could not get memory |
493 | */ |
||
494 | static int magazine_obj_put(slab_cache_t *cache, void *obj) |
||
495 | { |
||
496 | slab_magazine_t *mag; |
||
497 | |||
772 | palkovsky | 498 | if (!CPU) |
499 | return -1; |
||
500 | |||
759 | palkovsky | 501 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
768 | palkovsky | 502 | |
503 | mag = make_empty_current_mag(cache); |
||
769 | palkovsky | 504 | if (!mag) { |
505 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
||
506 | return -1; |
||
507 | } |
||
759 | palkovsky | 508 | |
509 | mag->objs[mag->busy++] = obj; |
||
510 | |||
511 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
||
767 | palkovsky | 512 | atomic_inc(&cache->cached_objs); |
759 | palkovsky | 513 | return 0; |
514 | } |
||
515 | |||
516 | |||
517 | /**************************************/ |
||
762 | palkovsky | 518 | /* SLAB CACHE functions */ |
759 | palkovsky | 519 | |
762 | palkovsky | 520 | /** Return number of objects that fit in certain cache size */ |
521 | static int comp_objects(slab_cache_t *cache) |
||
522 | { |
||
523 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
||
524 | return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size; |
||
525 | else |
||
526 | return (PAGE_SIZE << cache->order) / cache->size; |
||
527 | } |
||
528 | |||
529 | /** Return wasted space in slab */ |
||
530 | static int badness(slab_cache_t *cache) |
||
531 | { |
||
532 | int objects; |
||
533 | int ssize; |
||
534 | |||
535 | objects = comp_objects(cache); |
||
536 | ssize = PAGE_SIZE << cache->order; |
||
537 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
||
538 | ssize -= sizeof(slab_t); |
||
539 | return ssize - objects*cache->size; |
||
540 | } |
||
541 | |||
789 | palkovsky | 542 | /** |
543 | * Initialize mag_cache structure in slab cache |
||
544 | */ |
||
545 | static void make_magcache(slab_cache_t *cache) |
||
546 | { |
||
547 | int i; |
||
791 | palkovsky | 548 | |
549 | ASSERT(_slab_initialized >= 2); |
||
789 | palkovsky | 550 | |
791 | palkovsky | 551 | cache->mag_cache = kalloc(sizeof(slab_mag_cache_t)*config.cpu_count,0); |
789 | palkovsky | 552 | for (i=0; i < config.cpu_count; i++) { |
553 | memsetb((__address)&cache->mag_cache[i], |
||
554 | sizeof(cache->mag_cache[i]), 0); |
||
555 | spinlock_initialize(&cache->mag_cache[i].lock, |
||
556 | "slab_maglock_cpu"); |
||
557 | } |
||
558 | } |
||
559 | |||
759 | palkovsky | 560 | /** Initialize allocated memory as a slab cache */ |
561 | static void |
||
562 | _slab_cache_create(slab_cache_t *cache, |
||
563 | char *name, |
||
564 | size_t size, |
||
565 | size_t align, |
||
566 | int (*constructor)(void *obj, int kmflag), |
||
787 | palkovsky | 567 | int (*destructor)(void *obj), |
759 | palkovsky | 568 | int flags) |
569 | { |
||
771 | palkovsky | 570 | int pages; |
783 | palkovsky | 571 | ipl_t ipl; |
759 | palkovsky | 572 | |
573 | memsetb((__address)cache, sizeof(*cache), 0); |
||
574 | cache->name = name; |
||
575 | |||
766 | palkovsky | 576 | if (align < sizeof(__native)) |
577 | align = sizeof(__native); |
||
578 | size = ALIGN_UP(size, align); |
||
579 | |||
762 | palkovsky | 580 | cache->size = size; |
759 | palkovsky | 581 | |
582 | cache->constructor = constructor; |
||
583 | cache->destructor = destructor; |
||
584 | cache->flags = flags; |
||
585 | |||
586 | list_initialize(&cache->full_slabs); |
||
587 | list_initialize(&cache->partial_slabs); |
||
588 | list_initialize(&cache->magazines); |
||
776 | palkovsky | 589 | spinlock_initialize(&cache->slablock, "slab_lock"); |
590 | spinlock_initialize(&cache->maglock, "slab_maglock"); |
||
789 | palkovsky | 591 | if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) |
592 | make_magcache(cache); |
||
759 | palkovsky | 593 | |
594 | /* Compute slab sizes, object counts in slabs etc. */ |
||
595 | if (cache->size < SLAB_INSIDE_SIZE) |
||
596 | cache->flags |= SLAB_CACHE_SLINSIDE; |
||
597 | |||
762 | palkovsky | 598 | /* Minimum slab order */ |
771 | palkovsky | 599 | pages = ((cache->size-1) >> PAGE_WIDTH) + 1; |
600 | cache->order = fnzb(pages); |
||
766 | palkovsky | 601 | |
762 | palkovsky | 602 | while (badness(cache) > SLAB_MAX_BADNESS(cache)) { |
603 | cache->order += 1; |
||
604 | } |
||
605 | cache->objects = comp_objects(cache); |
||
766 | palkovsky | 606 | /* If info fits in, put it inside */ |
607 | if (badness(cache) > sizeof(slab_t)) |
||
608 | cache->flags |= SLAB_CACHE_SLINSIDE; |
||
762 | palkovsky | 609 | |
783 | palkovsky | 610 | /* Add cache to cache list */ |
611 | ipl = interrupts_disable(); |
||
759 | palkovsky | 612 | spinlock_lock(&slab_cache_lock); |
613 | |||
614 | list_append(&cache->link, &slab_cache_list); |
||
615 | |||
616 | spinlock_unlock(&slab_cache_lock); |
||
783 | palkovsky | 617 | interrupts_restore(ipl); |
759 | palkovsky | 618 | } |
619 | |||
620 | /** Create slab cache */ |
||
621 | slab_cache_t * slab_cache_create(char *name, |
||
622 | size_t size, |
||
623 | size_t align, |
||
624 | int (*constructor)(void *obj, int kmflag), |
||
787 | palkovsky | 625 | int (*destructor)(void *obj), |
759 | palkovsky | 626 | int flags) |
627 | { |
||
628 | slab_cache_t *cache; |
||
629 | |||
769 | palkovsky | 630 | cache = slab_alloc(&slab_cache_cache, 0); |
759 | palkovsky | 631 | _slab_cache_create(cache, name, size, align, constructor, destructor, |
632 | flags); |
||
633 | return cache; |
||
634 | } |
||
635 | |||
636 | /** |
||
637 | * Reclaim space occupied by objects that are already free |
||
638 | * |
||
639 | * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing |
||
640 | * @return Number of freed pages |
||
641 | */ |
||
642 | static count_t _slab_reclaim(slab_cache_t *cache, int flags) |
||
643 | { |
||
644 | int i; |
||
645 | slab_magazine_t *mag; |
||
646 | count_t frames = 0; |
||
781 | palkovsky | 647 | int magcount; |
759 | palkovsky | 648 | |
649 | if (cache->flags & SLAB_CACHE_NOMAGAZINE) |
||
650 | return 0; /* Nothing to do */ |
||
781 | palkovsky | 651 | |
652 | /* We count up to original magazine count to avoid |
||
653 | * endless loop |
||
654 | */ |
||
655 | magcount = atomic_get(&cache->magazine_counter); |
||
656 | while (magcount-- && (mag=get_mag_from_cache(cache,0))) { |
||
657 | frames += magazine_destroy(cache,mag); |
||
658 | if (!(flags & SLAB_RECLAIM_ALL) && frames) |
||
659 | break; |
||
769 | palkovsky | 660 | } |
759 | palkovsky | 661 | |
662 | if (flags & SLAB_RECLAIM_ALL) { |
||
781 | palkovsky | 663 | /* Free cpu-bound magazines */ |
759 | palkovsky | 664 | /* Destroy CPU magazines */ |
665 | for (i=0; i<config.cpu_count; i++) { |
||
781 | palkovsky | 666 | spinlock_lock(&cache->mag_cache[i].lock); |
667 | |||
759 | palkovsky | 668 | mag = cache->mag_cache[i].current; |
669 | if (mag) |
||
670 | frames += magazine_destroy(cache, mag); |
||
671 | cache->mag_cache[i].current = NULL; |
||
672 | |||
673 | mag = cache->mag_cache[i].last; |
||
674 | if (mag) |
||
675 | frames += magazine_destroy(cache, mag); |
||
676 | cache->mag_cache[i].last = NULL; |
||
781 | palkovsky | 677 | |
678 | spinlock_unlock(&cache->mag_cache[i].lock); |
||
759 | palkovsky | 679 | } |
680 | } |
||
767 | palkovsky | 681 | |
759 | palkovsky | 682 | return frames; |
683 | } |
||
684 | |||
685 | /** Check that there are no slabs and remove cache from system */ |
||
686 | void slab_cache_destroy(slab_cache_t *cache) |
||
687 | { |
||
781 | palkovsky | 688 | ipl_t ipl; |
689 | |||
690 | /* First remove cache from link, so that we don't need |
||
691 | * to disable interrupts later |
||
692 | */ |
||
693 | |||
694 | ipl = interrupts_disable(); |
||
695 | spinlock_lock(&slab_cache_lock); |
||
696 | |||
697 | list_remove(&cache->link); |
||
698 | |||
699 | spinlock_unlock(&slab_cache_lock); |
||
700 | interrupts_restore(ipl); |
||
701 | |||
759 | palkovsky | 702 | /* Do not lock anything, we assume the software is correct and |
703 | * does not touch the cache when it decides to destroy it */ |
||
704 | |||
705 | /* Destroy all magazines */ |
||
706 | _slab_reclaim(cache, SLAB_RECLAIM_ALL); |
||
707 | |||
708 | /* All slabs must be empty */ |
||
709 | if (!list_empty(&cache->full_slabs) \ |
||
710 | || !list_empty(&cache->partial_slabs)) |
||
711 | panic("Destroying cache that is not empty."); |
||
712 | |||
789 | palkovsky | 713 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) |
791 | palkovsky | 714 | kfree(cache->mag_cache); |
769 | palkovsky | 715 | slab_free(&slab_cache_cache, cache); |
759 | palkovsky | 716 | } |
717 | |||
718 | /** Allocate new object from cache - if no flags given, always returns |
||
719 | memory */ |
||
720 | void * slab_alloc(slab_cache_t *cache, int flags) |
||
721 | { |
||
722 | ipl_t ipl; |
||
723 | void *result = NULL; |
||
773 | palkovsky | 724 | |
759 | palkovsky | 725 | /* Disable interrupts to avoid deadlocks with interrupt handlers */ |
726 | ipl = interrupts_disable(); |
||
771 | palkovsky | 727 | |
772 | palkovsky | 728 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) |
759 | palkovsky | 729 | result = magazine_obj_get(cache); |
776 | palkovsky | 730 | if (!result) |
759 | palkovsky | 731 | result = slab_obj_create(cache, flags); |
732 | |||
769 | palkovsky | 733 | interrupts_restore(ipl); |
734 | |||
764 | palkovsky | 735 | if (result) |
736 | atomic_inc(&cache->allocated_objs); |
||
737 | |||
759 | palkovsky | 738 | return result; |
739 | } |
||
740 | |||
771 | palkovsky | 741 | /** Return object to cache, use slab if known */ |
742 | static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) |
||
759 | palkovsky | 743 | { |
744 | ipl_t ipl; |
||
745 | |||
746 | ipl = interrupts_disable(); |
||
747 | |||
762 | palkovsky | 748 | if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \ |
749 | || magazine_obj_put(cache, obj)) { |
||
776 | palkovsky | 750 | |
771 | palkovsky | 751 | slab_obj_destroy(cache, obj, slab); |
776 | palkovsky | 752 | |
759 | palkovsky | 753 | } |
769 | palkovsky | 754 | interrupts_restore(ipl); |
764 | palkovsky | 755 | atomic_dec(&cache->allocated_objs); |
759 | palkovsky | 756 | } |
757 | |||
771 | palkovsky | 758 | /** Return slab object to cache */ |
759 | void slab_free(slab_cache_t *cache, void *obj) |
||
760 | { |
||
761 | _slab_free(cache,obj,NULL); |
||
762 | } |
||
763 | |||
759 | palkovsky | 764 | /* Go through all caches and reclaim what is possible */ |
765 | count_t slab_reclaim(int flags) |
||
766 | { |
||
767 | slab_cache_t *cache; |
||
768 | link_t *cur; |
||
769 | count_t frames = 0; |
||
770 | |||
771 | spinlock_lock(&slab_cache_lock); |
||
772 | |||
776 | palkovsky | 773 | /* TODO: Add assert, that interrupts are disabled, otherwise |
774 | * memory allocation from interrupts can deadlock. |
||
775 | */ |
||
776 | |||
759 | palkovsky | 777 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
778 | cache = list_get_instance(cur, slab_cache_t, link); |
||
779 | frames += _slab_reclaim(cache, flags); |
||
780 | } |
||
781 | |||
782 | spinlock_unlock(&slab_cache_lock); |
||
783 | |||
784 | return frames; |
||
785 | } |
||
786 | |||
787 | |||
788 | /* Print list of slabs */ |
||
789 | void slab_print_list(void) |
||
790 | { |
||
791 | slab_cache_t *cache; |
||
792 | link_t *cur; |
||
783 | palkovsky | 793 | ipl_t ipl; |
794 | |||
795 | ipl = interrupts_disable(); |
||
759 | palkovsky | 796 | spinlock_lock(&slab_cache_lock); |
767 | palkovsky | 797 | printf("SLAB name\tOsize\tPages\tObj/pg\tSlabs\tCached\tAllocobjs\tCtl\n"); |
759 | palkovsky | 798 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
799 | cache = list_get_instance(cur, slab_cache_t, link); |
||
767 | palkovsky | 800 | printf("%s\t%d\t%d\t%d\t%d\t%d\t%d\t\t%s\n", cache->name, cache->size, |
766 | palkovsky | 801 | (1 << cache->order), cache->objects, |
767 | palkovsky | 802 | atomic_get(&cache->allocated_slabs), |
803 | atomic_get(&cache->cached_objs), |
||
766 | palkovsky | 804 | atomic_get(&cache->allocated_objs), |
805 | cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out"); |
||
759 | palkovsky | 806 | } |
807 | spinlock_unlock(&slab_cache_lock); |
||
783 | palkovsky | 808 | interrupts_restore(ipl); |
759 | palkovsky | 809 | } |
810 | |||
811 | void slab_cache_init(void) |
||
812 | { |
||
771 | palkovsky | 813 | int i, size; |
814 | |||
759 | palkovsky | 815 | /* Initialize magazine cache */ |
816 | _slab_cache_create(&mag_cache, |
||
817 | "slab_magazine", |
||
818 | sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*), |
||
819 | sizeof(__address), |
||
820 | NULL, NULL, |
||
769 | palkovsky | 821 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
822 | /* Initialize slab_cache cache */ |
||
823 | _slab_cache_create(&slab_cache_cache, |
||
824 | "slab_cache", |
||
789 | palkovsky | 825 | sizeof(slab_cache_cache), |
769 | palkovsky | 826 | sizeof(__address), |
827 | NULL, NULL, |
||
828 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
||
829 | /* Initialize external slab cache */ |
||
830 | slab_extern_cache = slab_cache_create("slab_extern", |
||
831 | sizeof(slab_t), |
||
832 | 0, NULL, NULL, |
||
789 | palkovsky | 833 | SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED); |
759 | palkovsky | 834 | |
835 | /* Initialize structures for malloc */ |
||
771 | palkovsky | 836 | for (i=0, size=(1<<SLAB_MIN_MALLOC_W); |
837 | i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1); |
||
838 | i++, size <<= 1) { |
||
839 | malloc_caches[i] = slab_cache_create(malloc_names[i], |
||
840 | size, 0, |
||
789 | palkovsky | 841 | NULL,NULL, SLAB_CACHE_MAGDEFERRED); |
771 | palkovsky | 842 | } |
778 | palkovsky | 843 | #ifdef CONFIG_DEBUG |
844 | _slab_initialized = 1; |
||
845 | #endif |
||
759 | palkovsky | 846 | } |
771 | palkovsky | 847 | |
789 | palkovsky | 848 | /** Enable cpu_cache |
849 | * |
||
850 | * Kernel calls this function, when it knows the real number of |
||
851 | * processors. |
||
852 | * Allocate slab for cpucache and enable it on all existing |
||
853 | * slabs that are SLAB_CACHE_MAGDEFERRED |
||
854 | */ |
||
855 | void slab_enable_cpucache(void) |
||
856 | { |
||
857 | link_t *cur; |
||
858 | slab_cache_t *s; |
||
859 | |||
791 | palkovsky | 860 | #ifdef CONFIG_DEBUG |
861 | _slab_initialized = 2; |
||
862 | #endif |
||
863 | |||
789 | palkovsky | 864 | spinlock_lock(&slab_cache_lock); |
865 | |||
866 | for (cur=slab_cache_list.next; cur != &slab_cache_list;cur=cur->next){ |
||
867 | s = list_get_instance(cur, slab_cache_t, link); |
||
868 | if ((s->flags & SLAB_CACHE_MAGDEFERRED) != SLAB_CACHE_MAGDEFERRED) |
||
869 | continue; |
||
870 | make_magcache(s); |
||
871 | s->flags &= ~SLAB_CACHE_MAGDEFERRED; |
||
872 | } |
||
873 | |||
874 | spinlock_unlock(&slab_cache_lock); |
||
875 | } |
||
876 | |||
771 | palkovsky | 877 | /**************************************/ |
878 | /* kalloc/kfree functions */ |
||
879 | void * kalloc(unsigned int size, int flags) |
||
880 | { |
||
881 | int idx; |
||
778 | palkovsky | 882 | |
883 | ASSERT(_slab_initialized); |
||
771 | palkovsky | 884 | ASSERT( size && size <= (1 << SLAB_MAX_MALLOC_W)); |
885 | |||
886 | if (size < (1 << SLAB_MIN_MALLOC_W)) |
||
887 | size = (1 << SLAB_MIN_MALLOC_W); |
||
888 | |||
889 | idx = fnzb(size-1) - SLAB_MIN_MALLOC_W + 1; |
||
890 | |||
891 | return slab_alloc(malloc_caches[idx], flags); |
||
892 | } |
||
893 | |||
894 | |||
895 | void kfree(void *obj) |
||
896 | { |
||
781 | palkovsky | 897 | slab_t *slab; |
898 | |||
899 | if (!obj) return; |
||
900 | |||
901 | slab = obj2slab(obj); |
||
771 | palkovsky | 902 | _slab_free(slab->cache, obj, slab); |
903 | } |