Rev 791 | Rev 1144 | Go to most recent revision | Details | Compare with Previous | Last modification | View Log | RSS feed
Rev | Author | Line No. | Line |
---|---|---|---|
759 | palkovsky | 1 | /* |
2 | * Copyright (C) 2006 Ondrej Palkovsky |
||
3 | * All rights reserved. |
||
4 | * |
||
5 | * Redistribution and use in source and binary forms, with or without |
||
6 | * modification, are permitted provided that the following conditions |
||
7 | * are met: |
||
8 | * |
||
9 | * - Redistributions of source code must retain the above copyright |
||
10 | * notice, this list of conditions and the following disclaimer. |
||
11 | * - Redistributions in binary form must reproduce the above copyright |
||
12 | * notice, this list of conditions and the following disclaimer in the |
||
13 | * documentation and/or other materials provided with the distribution. |
||
14 | * - The name of the author may not be used to endorse or promote products |
||
15 | * derived from this software without specific prior written permission. |
||
16 | * |
||
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
||
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
||
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
||
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
||
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
||
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
||
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
||
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
||
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
||
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
||
27 | */ |
||
28 | |||
769 | palkovsky | 29 | /* |
785 | jermar | 30 | * The SLAB allocator is closely modelled after OpenSolaris SLAB allocator |
769 | palkovsky | 31 | * http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/ |
32 | * |
||
33 | * with the following exceptions: |
||
34 | * - empty SLABS are deallocated immediately |
||
35 | * (in Linux they are kept in linked list, in Solaris ???) |
||
36 | * - empty magazines are deallocated when not needed |
||
37 | * (in Solaris they are held in linked list in slab cache) |
||
38 | * |
||
39 | * Following features are not currently supported but would be easy to do: |
||
40 | * - cache coloring |
||
41 | * - dynamic magazine growing (different magazine sizes are already |
||
42 | * supported, but we would need to adjust allocating strategy) |
||
43 | * |
||
44 | * The SLAB allocator supports per-CPU caches ('magazines') to facilitate |
||
45 | * good SMP scaling. |
||
46 | * |
||
47 | * When a new object is being allocated, it is first checked, if it is |
||
48 | * available in CPU-bound magazine. If it is not found there, it is |
||
49 | * allocated from CPU-shared SLAB - if partial full is found, it is used, |
||
50 | * otherwise a new one is allocated. |
||
51 | * |
||
52 | * When an object is being deallocated, it is put to CPU-bound magazine. |
||
53 | * If there is no such magazine, new one is allocated (if it fails, |
||
54 | * the object is deallocated into SLAB). If the magazine is full, it is |
||
55 | * put into cpu-shared list of magazines and new one is allocated. |
||
56 | * |
||
57 | * The CPU-bound magazine is actually a pair of magazine to avoid |
||
58 | * thrashing when somebody is allocating/deallocating 1 item at the magazine |
||
59 | * size boundary. LIFO order is enforced, which should avoid fragmentation |
||
60 | * as much as possible. |
||
61 | * |
||
62 | * Every cache contains list of full slabs and list of partialy full slabs. |
||
63 | * Empty SLABS are immediately freed (thrashing will be avoided because |
||
64 | * of magazines). |
||
65 | * |
||
66 | * The SLAB information structure is kept inside the data area, if possible. |
||
67 | * The cache can be marked that it should not use magazines. This is used |
||
68 | * only for SLAB related caches to avoid deadlocks and infinite recursion |
||
69 | * (the SLAB allocator uses itself for allocating all it's control structures). |
||
70 | * |
||
71 | * The SLAB allocator allocates lot of space and does not free it. When |
||
72 | * frame allocator fails to allocate the frame, it calls slab_reclaim(). |
||
73 | * It tries 'light reclaim' first, then brutal reclaim. The light reclaim |
||
74 | * releases slabs from cpu-shared magazine-list, until at least 1 slab |
||
75 | * is deallocated in each cache (this algorithm should probably change). |
||
76 | * The brutal reclaim removes all cached objects, even from CPU-bound |
||
77 | * magazines. |
||
78 | * |
||
775 | palkovsky | 79 | * TODO: For better CPU-scaling the magazine allocation strategy should |
80 | * be extended. Currently, if the cache does not have magazine, it asks |
||
81 | * for non-cpu cached magazine cache to provide one. It might be feasible |
||
82 | * to add cpu-cached magazine cache (which would allocate it's magazines |
||
83 | * from non-cpu-cached mag. cache). This would provide a nice per-cpu |
||
84 | * buffer. The other possibility is to use the per-cache |
||
85 | * 'empty-magazine-list', which decreases competing for 1 per-system |
||
86 | * magazine cache. |
||
87 | * |
||
776 | palkovsky | 88 | * - it might be good to add granularity of locks even to slab level, |
89 | * we could then try_spinlock over all partial slabs and thus improve |
||
90 | * scalability even on slab level |
||
769 | palkovsky | 91 | */ |
92 | |||
93 | |||
759 | palkovsky | 94 | #include <synch/spinlock.h> |
95 | #include <mm/slab.h> |
||
788 | jermar | 96 | #include <adt/list.h> |
759 | palkovsky | 97 | #include <memstr.h> |
98 | #include <align.h> |
||
762 | palkovsky | 99 | #include <mm/frame.h> |
759 | palkovsky | 100 | #include <config.h> |
101 | #include <print.h> |
||
102 | #include <arch.h> |
||
103 | #include <panic.h> |
||
762 | palkovsky | 104 | #include <debug.h> |
771 | palkovsky | 105 | #include <bitops.h> |
759 | palkovsky | 106 | |
107 | SPINLOCK_INITIALIZE(slab_cache_lock); |
||
769 | palkovsky | 108 | static LIST_INITIALIZE(slab_cache_list); |
759 | palkovsky | 109 | |
769 | palkovsky | 110 | /** Magazine cache */ |
111 | static slab_cache_t mag_cache; |
||
112 | /** Cache for cache descriptors */ |
||
113 | static slab_cache_t slab_cache_cache; |
||
114 | /** Cache for external slab descriptors |
||
115 | * This time we want per-cpu cache, so do not make it static |
||
116 | * - using SLAB for internal SLAB structures will not deadlock, |
||
117 | * as all slab structures are 'small' - control structures of |
||
118 | * their caches do not require further allocation |
||
119 | */ |
||
120 | static slab_cache_t *slab_extern_cache; |
||
771 | palkovsky | 121 | /** Caches for malloc */ |
122 | static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1]; |
||
123 | char *malloc_names[] = { |
||
791 | palkovsky | 124 | "malloc-16","malloc-32","malloc-64","malloc-128", |
771 | palkovsky | 125 | "malloc-256","malloc-512","malloc-1K","malloc-2K", |
126 | "malloc-4K","malloc-8K","malloc-16K","malloc-32K", |
||
127 | "malloc-64K","malloc-128K" |
||
128 | }; |
||
762 | palkovsky | 129 | |
769 | palkovsky | 130 | /** Slab descriptor */ |
762 | palkovsky | 131 | typedef struct { |
132 | slab_cache_t *cache; /**< Pointer to parent cache */ |
||
133 | link_t link; /* List of full/partial slabs */ |
||
134 | void *start; /**< Start address of first available item */ |
||
135 | count_t available; /**< Count of available items in this slab */ |
||
136 | index_t nextavail; /**< The index of next available item */ |
||
137 | }slab_t; |
||
138 | |||
791 | palkovsky | 139 | #ifdef CONFIG_DEBUG |
140 | static int _slab_initialized = 0; |
||
141 | #endif |
||
142 | |||
759 | palkovsky | 143 | /**************************************/ |
762 | palkovsky | 144 | /* SLAB allocation functions */ |
759 | palkovsky | 145 | |
762 | palkovsky | 146 | /** |
147 | * Allocate frames for slab space and initialize |
||
148 | * |
||
149 | */ |
||
150 | static slab_t * slab_space_alloc(slab_cache_t *cache, int flags) |
||
151 | { |
||
152 | void *data; |
||
153 | slab_t *slab; |
||
154 | size_t fsize; |
||
155 | int i; |
||
156 | int status; |
||
814 | palkovsky | 157 | pfn_t pfn; |
158 | int zone=0; |
||
159 | |||
160 | pfn = frame_alloc_rc_zone(cache->order, FRAME_KA | flags, &status, &zone); |
||
161 | data = (void *) PA2KA(PFN2ADDR(pfn)); |
||
764 | palkovsky | 162 | if (status != FRAME_OK) { |
762 | palkovsky | 163 | return NULL; |
764 | palkovsky | 164 | } |
768 | palkovsky | 165 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) { |
769 | palkovsky | 166 | slab = slab_alloc(slab_extern_cache, flags); |
762 | palkovsky | 167 | if (!slab) { |
814 | palkovsky | 168 | frame_free(ADDR2PFN(KA2PA(data))); |
762 | palkovsky | 169 | return NULL; |
170 | } |
||
171 | } else { |
||
172 | fsize = (PAGE_SIZE << cache->order); |
||
173 | slab = data + fsize - sizeof(*slab); |
||
174 | } |
||
764 | palkovsky | 175 | |
762 | palkovsky | 176 | /* Fill in slab structures */ |
814 | palkovsky | 177 | for (i=0; i < (1 << cache->order); i++) |
178 | frame_set_parent(pfn+i, slab, zone); |
||
762 | palkovsky | 179 | |
180 | slab->start = data; |
||
181 | slab->available = cache->objects; |
||
182 | slab->nextavail = 0; |
||
767 | palkovsky | 183 | slab->cache = cache; |
762 | palkovsky | 184 | |
185 | for (i=0; i<cache->objects;i++) |
||
186 | *((int *) (slab->start + i*cache->size)) = i+1; |
||
764 | palkovsky | 187 | |
188 | atomic_inc(&cache->allocated_slabs); |
||
762 | palkovsky | 189 | return slab; |
190 | } |
||
191 | |||
759 | palkovsky | 192 | /** |
766 | palkovsky | 193 | * Deallocate space associated with SLAB |
762 | palkovsky | 194 | * |
195 | * @return number of freed frames |
||
196 | */ |
||
197 | static count_t slab_space_free(slab_cache_t *cache, slab_t *slab) |
||
198 | { |
||
814 | palkovsky | 199 | frame_free(ADDR2PFN(KA2PA(slab->start))); |
768 | palkovsky | 200 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) |
769 | palkovsky | 201 | slab_free(slab_extern_cache, slab); |
764 | palkovsky | 202 | |
203 | atomic_dec(&cache->allocated_slabs); |
||
204 | |||
762 | palkovsky | 205 | return 1 << cache->order; |
206 | } |
||
207 | |||
208 | /** Map object to slab structure */ |
||
209 | static slab_t * obj2slab(void *obj) |
||
210 | { |
||
814 | palkovsky | 211 | return (slab_t *)frame_get_parent(ADDR2PFN(KA2PA(obj)), 0); |
762 | palkovsky | 212 | } |
213 | |||
214 | /**************************************/ |
||
215 | /* SLAB functions */ |
||
216 | |||
217 | |||
218 | /** |
||
759 | palkovsky | 219 | * Return object to slab and call a destructor |
220 | * |
||
762 | palkovsky | 221 | * @param slab If the caller knows directly slab of the object, otherwise NULL |
222 | * |
||
759 | palkovsky | 223 | * @return Number of freed pages |
224 | */ |
||
762 | palkovsky | 225 | static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, |
226 | slab_t *slab) |
||
759 | palkovsky | 227 | { |
787 | palkovsky | 228 | int freed = 0; |
229 | |||
762 | palkovsky | 230 | if (!slab) |
231 | slab = obj2slab(obj); |
||
232 | |||
767 | palkovsky | 233 | ASSERT(slab->cache == cache); |
234 | |||
787 | palkovsky | 235 | if (cache->destructor) |
236 | freed = cache->destructor(obj); |
||
237 | |||
776 | palkovsky | 238 | spinlock_lock(&cache->slablock); |
789 | palkovsky | 239 | ASSERT(slab->available < cache->objects); |
776 | palkovsky | 240 | |
762 | palkovsky | 241 | *((int *)obj) = slab->nextavail; |
242 | slab->nextavail = (obj - slab->start)/cache->size; |
||
243 | slab->available++; |
||
244 | |||
245 | /* Move it to correct list */ |
||
246 | if (slab->available == cache->objects) { |
||
247 | /* Free associated memory */ |
||
248 | list_remove(&slab->link); |
||
782 | palkovsky | 249 | spinlock_unlock(&cache->slablock); |
250 | |||
787 | palkovsky | 251 | return freed + slab_space_free(cache, slab); |
782 | palkovsky | 252 | |
780 | palkovsky | 253 | } else if (slab->available == 1) { |
254 | /* It was in full, move to partial */ |
||
255 | list_remove(&slab->link); |
||
256 | list_prepend(&slab->link, &cache->partial_slabs); |
||
762 | palkovsky | 257 | } |
783 | palkovsky | 258 | spinlock_unlock(&cache->slablock); |
787 | palkovsky | 259 | return freed; |
759 | palkovsky | 260 | } |
261 | |||
262 | /** |
||
263 | * Take new object from slab or create new if needed |
||
264 | * |
||
265 | * @return Object address or null |
||
266 | */ |
||
267 | static void * slab_obj_create(slab_cache_t *cache, int flags) |
||
268 | { |
||
762 | palkovsky | 269 | slab_t *slab; |
270 | void *obj; |
||
271 | |||
776 | palkovsky | 272 | spinlock_lock(&cache->slablock); |
273 | |||
762 | palkovsky | 274 | if (list_empty(&cache->partial_slabs)) { |
275 | /* Allow recursion and reclaiming |
||
276 | * - this should work, as the SLAB control structures |
||
277 | * are small and do not need to allocte with anything |
||
278 | * other ten frame_alloc when they are allocating, |
||
279 | * that's why we should get recursion at most 1-level deep |
||
280 | */ |
||
776 | palkovsky | 281 | spinlock_unlock(&cache->slablock); |
762 | palkovsky | 282 | slab = slab_space_alloc(cache, flags); |
780 | palkovsky | 283 | if (!slab) |
284 | return NULL; |
||
776 | palkovsky | 285 | spinlock_lock(&cache->slablock); |
762 | palkovsky | 286 | } else { |
287 | slab = list_get_instance(cache->partial_slabs.next, |
||
288 | slab_t, |
||
289 | link); |
||
290 | list_remove(&slab->link); |
||
291 | } |
||
292 | obj = slab->start + slab->nextavail * cache->size; |
||
293 | slab->nextavail = *((int *)obj); |
||
294 | slab->available--; |
||
787 | palkovsky | 295 | |
762 | palkovsky | 296 | if (! slab->available) |
764 | palkovsky | 297 | list_prepend(&slab->link, &cache->full_slabs); |
762 | palkovsky | 298 | else |
764 | palkovsky | 299 | list_prepend(&slab->link, &cache->partial_slabs); |
776 | palkovsky | 300 | |
301 | spinlock_unlock(&cache->slablock); |
||
787 | palkovsky | 302 | |
303 | if (cache->constructor && cache->constructor(obj, flags)) { |
||
304 | /* Bad, bad, construction failed */ |
||
305 | slab_obj_destroy(cache, obj, slab); |
||
306 | return NULL; |
||
307 | } |
||
762 | palkovsky | 308 | return obj; |
759 | palkovsky | 309 | } |
310 | |||
311 | /**************************************/ |
||
312 | /* CPU-Cache slab functions */ |
||
313 | |||
314 | /** |
||
781 | palkovsky | 315 | * Finds a full magazine in cache, takes it from list |
316 | * and returns it |
||
317 | * |
||
318 | * @param first If true, return first, else last mag |
||
319 | */ |
||
320 | static slab_magazine_t * get_mag_from_cache(slab_cache_t *cache, |
||
321 | int first) |
||
322 | { |
||
323 | slab_magazine_t *mag = NULL; |
||
324 | link_t *cur; |
||
325 | |||
326 | spinlock_lock(&cache->maglock); |
||
327 | if (!list_empty(&cache->magazines)) { |
||
328 | if (first) |
||
329 | cur = cache->magazines.next; |
||
330 | else |
||
331 | cur = cache->magazines.prev; |
||
332 | mag = list_get_instance(cur, slab_magazine_t, link); |
||
333 | list_remove(&mag->link); |
||
334 | atomic_dec(&cache->magazine_counter); |
||
335 | } |
||
336 | spinlock_unlock(&cache->maglock); |
||
337 | return mag; |
||
338 | } |
||
339 | |||
340 | /** Prepend magazine to magazine list in cache */ |
||
341 | static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag) |
||
342 | { |
||
343 | spinlock_lock(&cache->maglock); |
||
344 | |||
345 | list_prepend(&mag->link, &cache->magazines); |
||
346 | atomic_inc(&cache->magazine_counter); |
||
347 | |||
348 | spinlock_unlock(&cache->maglock); |
||
349 | } |
||
350 | |||
351 | /** |
||
759 | palkovsky | 352 | * Free all objects in magazine and free memory associated with magazine |
353 | * |
||
354 | * @return Number of freed pages |
||
355 | */ |
||
356 | static count_t magazine_destroy(slab_cache_t *cache, |
||
357 | slab_magazine_t *mag) |
||
358 | { |
||
359 | int i; |
||
360 | count_t frames = 0; |
||
361 | |||
767 | palkovsky | 362 | for (i=0;i < mag->busy; i++) { |
762 | palkovsky | 363 | frames += slab_obj_destroy(cache, mag->objs[i], NULL); |
767 | palkovsky | 364 | atomic_dec(&cache->cached_objs); |
365 | } |
||
759 | palkovsky | 366 | |
367 | slab_free(&mag_cache, mag); |
||
368 | |||
369 | return frames; |
||
370 | } |
||
371 | |||
372 | /** |
||
769 | palkovsky | 373 | * Find full magazine, set it as current and return it |
374 | * |
||
375 | * Assume cpu_magazine lock is held |
||
376 | */ |
||
377 | static slab_magazine_t * get_full_current_mag(slab_cache_t *cache) |
||
378 | { |
||
379 | slab_magazine_t *cmag, *lastmag, *newmag; |
||
380 | |||
381 | cmag = cache->mag_cache[CPU->id].current; |
||
382 | lastmag = cache->mag_cache[CPU->id].last; |
||
383 | if (cmag) { /* First try local CPU magazines */ |
||
384 | if (cmag->busy) |
||
385 | return cmag; |
||
386 | |||
387 | if (lastmag && lastmag->busy) { |
||
388 | cache->mag_cache[CPU->id].current = lastmag; |
||
389 | cache->mag_cache[CPU->id].last = cmag; |
||
390 | return lastmag; |
||
391 | } |
||
392 | } |
||
393 | /* Local magazines are empty, import one from magazine list */ |
||
781 | palkovsky | 394 | newmag = get_mag_from_cache(cache, 1); |
395 | if (!newmag) |
||
769 | palkovsky | 396 | return NULL; |
397 | |||
398 | if (lastmag) |
||
781 | palkovsky | 399 | magazine_destroy(cache, lastmag); |
400 | |||
769 | palkovsky | 401 | cache->mag_cache[CPU->id].last = cmag; |
402 | cache->mag_cache[CPU->id].current = newmag; |
||
403 | return newmag; |
||
404 | } |
||
405 | |||
406 | /** |
||
759 | palkovsky | 407 | * Try to find object in CPU-cache magazines |
408 | * |
||
409 | * @return Pointer to object or NULL if not available |
||
410 | */ |
||
411 | static void * magazine_obj_get(slab_cache_t *cache) |
||
412 | { |
||
413 | slab_magazine_t *mag; |
||
767 | palkovsky | 414 | void *obj; |
759 | palkovsky | 415 | |
772 | palkovsky | 416 | if (!CPU) |
417 | return NULL; |
||
418 | |||
759 | palkovsky | 419 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
420 | |||
769 | palkovsky | 421 | mag = get_full_current_mag(cache); |
422 | if (!mag) { |
||
423 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
||
424 | return NULL; |
||
759 | palkovsky | 425 | } |
767 | palkovsky | 426 | obj = mag->objs[--mag->busy]; |
759 | palkovsky | 427 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
767 | palkovsky | 428 | atomic_dec(&cache->cached_objs); |
429 | |||
430 | return obj; |
||
759 | palkovsky | 431 | } |
432 | |||
433 | /** |
||
768 | palkovsky | 434 | * Assure that the current magazine is empty, return pointer to it, or NULL if |
769 | palkovsky | 435 | * no empty magazine is available and cannot be allocated |
759 | palkovsky | 436 | * |
773 | palkovsky | 437 | * Assume mag_cache[CPU->id].lock is held |
438 | * |
||
759 | palkovsky | 439 | * We have 2 magazines bound to processor. |
440 | * First try the current. |
||
441 | * If full, try the last. |
||
442 | * If full, put to magazines list. |
||
443 | * allocate new, exchange last & current |
||
444 | * |
||
768 | palkovsky | 445 | */ |
446 | static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache) |
||
447 | { |
||
448 | slab_magazine_t *cmag,*lastmag,*newmag; |
||
449 | |||
450 | cmag = cache->mag_cache[CPU->id].current; |
||
451 | lastmag = cache->mag_cache[CPU->id].last; |
||
452 | |||
453 | if (cmag) { |
||
454 | if (cmag->busy < cmag->size) |
||
455 | return cmag; |
||
456 | if (lastmag && lastmag->busy < lastmag->size) { |
||
457 | cache->mag_cache[CPU->id].last = cmag; |
||
458 | cache->mag_cache[CPU->id].current = lastmag; |
||
459 | return lastmag; |
||
460 | } |
||
461 | } |
||
462 | /* current | last are full | nonexistent, allocate new */ |
||
463 | /* We do not want to sleep just because of caching */ |
||
464 | /* Especially we do not want reclaiming to start, as |
||
465 | * this would deadlock */ |
||
466 | newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); |
||
467 | if (!newmag) |
||
468 | return NULL; |
||
469 | newmag->size = SLAB_MAG_SIZE; |
||
470 | newmag->busy = 0; |
||
471 | |||
472 | /* Flush last to magazine list */ |
||
781 | palkovsky | 473 | if (lastmag) |
474 | put_mag_to_cache(cache, lastmag); |
||
475 | |||
768 | palkovsky | 476 | /* Move current as last, save new as current */ |
477 | cache->mag_cache[CPU->id].last = cmag; |
||
478 | cache->mag_cache[CPU->id].current = newmag; |
||
479 | |||
480 | return newmag; |
||
481 | } |
||
482 | |||
483 | /** |
||
484 | * Put object into CPU-cache magazine |
||
485 | * |
||
759 | palkovsky | 486 | * @return 0 - success, -1 - could not get memory |
487 | */ |
||
488 | static int magazine_obj_put(slab_cache_t *cache, void *obj) |
||
489 | { |
||
490 | slab_magazine_t *mag; |
||
491 | |||
772 | palkovsky | 492 | if (!CPU) |
493 | return -1; |
||
494 | |||
759 | palkovsky | 495 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
768 | palkovsky | 496 | |
497 | mag = make_empty_current_mag(cache); |
||
769 | palkovsky | 498 | if (!mag) { |
499 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
||
500 | return -1; |
||
501 | } |
||
759 | palkovsky | 502 | |
503 | mag->objs[mag->busy++] = obj; |
||
504 | |||
505 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
||
767 | palkovsky | 506 | atomic_inc(&cache->cached_objs); |
759 | palkovsky | 507 | return 0; |
508 | } |
||
509 | |||
510 | |||
511 | /**************************************/ |
||
762 | palkovsky | 512 | /* SLAB CACHE functions */ |
759 | palkovsky | 513 | |
762 | palkovsky | 514 | /** Return number of objects that fit in certain cache size */ |
515 | static int comp_objects(slab_cache_t *cache) |
||
516 | { |
||
517 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
||
518 | return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size; |
||
519 | else |
||
520 | return (PAGE_SIZE << cache->order) / cache->size; |
||
521 | } |
||
522 | |||
523 | /** Return wasted space in slab */ |
||
524 | static int badness(slab_cache_t *cache) |
||
525 | { |
||
526 | int objects; |
||
527 | int ssize; |
||
528 | |||
529 | objects = comp_objects(cache); |
||
530 | ssize = PAGE_SIZE << cache->order; |
||
531 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
||
532 | ssize -= sizeof(slab_t); |
||
533 | return ssize - objects*cache->size; |
||
534 | } |
||
535 | |||
789 | palkovsky | 536 | /** |
537 | * Initialize mag_cache structure in slab cache |
||
538 | */ |
||
539 | static void make_magcache(slab_cache_t *cache) |
||
540 | { |
||
541 | int i; |
||
791 | palkovsky | 542 | |
543 | ASSERT(_slab_initialized >= 2); |
||
789 | palkovsky | 544 | |
791 | palkovsky | 545 | cache->mag_cache = kalloc(sizeof(slab_mag_cache_t)*config.cpu_count,0); |
789 | palkovsky | 546 | for (i=0; i < config.cpu_count; i++) { |
547 | memsetb((__address)&cache->mag_cache[i], |
||
548 | sizeof(cache->mag_cache[i]), 0); |
||
549 | spinlock_initialize(&cache->mag_cache[i].lock, |
||
550 | "slab_maglock_cpu"); |
||
551 | } |
||
552 | } |
||
553 | |||
759 | palkovsky | 554 | /** Initialize allocated memory as a slab cache */ |
555 | static void |
||
556 | _slab_cache_create(slab_cache_t *cache, |
||
557 | char *name, |
||
558 | size_t size, |
||
559 | size_t align, |
||
560 | int (*constructor)(void *obj, int kmflag), |
||
787 | palkovsky | 561 | int (*destructor)(void *obj), |
759 | palkovsky | 562 | int flags) |
563 | { |
||
771 | palkovsky | 564 | int pages; |
783 | palkovsky | 565 | ipl_t ipl; |
759 | palkovsky | 566 | |
567 | memsetb((__address)cache, sizeof(*cache), 0); |
||
568 | cache->name = name; |
||
569 | |||
766 | palkovsky | 570 | if (align < sizeof(__native)) |
571 | align = sizeof(__native); |
||
572 | size = ALIGN_UP(size, align); |
||
573 | |||
762 | palkovsky | 574 | cache->size = size; |
759 | palkovsky | 575 | |
576 | cache->constructor = constructor; |
||
577 | cache->destructor = destructor; |
||
578 | cache->flags = flags; |
||
579 | |||
580 | list_initialize(&cache->full_slabs); |
||
581 | list_initialize(&cache->partial_slabs); |
||
582 | list_initialize(&cache->magazines); |
||
776 | palkovsky | 583 | spinlock_initialize(&cache->slablock, "slab_lock"); |
584 | spinlock_initialize(&cache->maglock, "slab_maglock"); |
||
789 | palkovsky | 585 | if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) |
586 | make_magcache(cache); |
||
759 | palkovsky | 587 | |
588 | /* Compute slab sizes, object counts in slabs etc. */ |
||
589 | if (cache->size < SLAB_INSIDE_SIZE) |
||
590 | cache->flags |= SLAB_CACHE_SLINSIDE; |
||
591 | |||
762 | palkovsky | 592 | /* Minimum slab order */ |
771 | palkovsky | 593 | pages = ((cache->size-1) >> PAGE_WIDTH) + 1; |
594 | cache->order = fnzb(pages); |
||
766 | palkovsky | 595 | |
762 | palkovsky | 596 | while (badness(cache) > SLAB_MAX_BADNESS(cache)) { |
597 | cache->order += 1; |
||
598 | } |
||
599 | cache->objects = comp_objects(cache); |
||
766 | palkovsky | 600 | /* If info fits in, put it inside */ |
601 | if (badness(cache) > sizeof(slab_t)) |
||
602 | cache->flags |= SLAB_CACHE_SLINSIDE; |
||
762 | palkovsky | 603 | |
783 | palkovsky | 604 | /* Add cache to cache list */ |
605 | ipl = interrupts_disable(); |
||
759 | palkovsky | 606 | spinlock_lock(&slab_cache_lock); |
607 | |||
608 | list_append(&cache->link, &slab_cache_list); |
||
609 | |||
610 | spinlock_unlock(&slab_cache_lock); |
||
783 | palkovsky | 611 | interrupts_restore(ipl); |
759 | palkovsky | 612 | } |
613 | |||
614 | /** Create slab cache */ |
||
615 | slab_cache_t * slab_cache_create(char *name, |
||
616 | size_t size, |
||
617 | size_t align, |
||
618 | int (*constructor)(void *obj, int kmflag), |
||
787 | palkovsky | 619 | int (*destructor)(void *obj), |
759 | palkovsky | 620 | int flags) |
621 | { |
||
622 | slab_cache_t *cache; |
||
623 | |||
769 | palkovsky | 624 | cache = slab_alloc(&slab_cache_cache, 0); |
759 | palkovsky | 625 | _slab_cache_create(cache, name, size, align, constructor, destructor, |
626 | flags); |
||
627 | return cache; |
||
628 | } |
||
629 | |||
630 | /** |
||
631 | * Reclaim space occupied by objects that are already free |
||
632 | * |
||
633 | * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing |
||
634 | * @return Number of freed pages |
||
635 | */ |
||
636 | static count_t _slab_reclaim(slab_cache_t *cache, int flags) |
||
637 | { |
||
638 | int i; |
||
639 | slab_magazine_t *mag; |
||
640 | count_t frames = 0; |
||
781 | palkovsky | 641 | int magcount; |
759 | palkovsky | 642 | |
643 | if (cache->flags & SLAB_CACHE_NOMAGAZINE) |
||
644 | return 0; /* Nothing to do */ |
||
781 | palkovsky | 645 | |
646 | /* We count up to original magazine count to avoid |
||
647 | * endless loop |
||
648 | */ |
||
649 | magcount = atomic_get(&cache->magazine_counter); |
||
650 | while (magcount-- && (mag=get_mag_from_cache(cache,0))) { |
||
651 | frames += magazine_destroy(cache,mag); |
||
652 | if (!(flags & SLAB_RECLAIM_ALL) && frames) |
||
653 | break; |
||
769 | palkovsky | 654 | } |
759 | palkovsky | 655 | |
656 | if (flags & SLAB_RECLAIM_ALL) { |
||
781 | palkovsky | 657 | /* Free cpu-bound magazines */ |
759 | palkovsky | 658 | /* Destroy CPU magazines */ |
659 | for (i=0; i<config.cpu_count; i++) { |
||
781 | palkovsky | 660 | spinlock_lock(&cache->mag_cache[i].lock); |
661 | |||
759 | palkovsky | 662 | mag = cache->mag_cache[i].current; |
663 | if (mag) |
||
664 | frames += magazine_destroy(cache, mag); |
||
665 | cache->mag_cache[i].current = NULL; |
||
666 | |||
667 | mag = cache->mag_cache[i].last; |
||
668 | if (mag) |
||
669 | frames += magazine_destroy(cache, mag); |
||
670 | cache->mag_cache[i].last = NULL; |
||
781 | palkovsky | 671 | |
672 | spinlock_unlock(&cache->mag_cache[i].lock); |
||
759 | palkovsky | 673 | } |
674 | } |
||
767 | palkovsky | 675 | |
759 | palkovsky | 676 | return frames; |
677 | } |
||
678 | |||
679 | /** Check that there are no slabs and remove cache from system */ |
||
680 | void slab_cache_destroy(slab_cache_t *cache) |
||
681 | { |
||
781 | palkovsky | 682 | ipl_t ipl; |
683 | |||
684 | /* First remove cache from link, so that we don't need |
||
685 | * to disable interrupts later |
||
686 | */ |
||
687 | |||
688 | ipl = interrupts_disable(); |
||
689 | spinlock_lock(&slab_cache_lock); |
||
690 | |||
691 | list_remove(&cache->link); |
||
692 | |||
693 | spinlock_unlock(&slab_cache_lock); |
||
694 | interrupts_restore(ipl); |
||
695 | |||
759 | palkovsky | 696 | /* Do not lock anything, we assume the software is correct and |
697 | * does not touch the cache when it decides to destroy it */ |
||
698 | |||
699 | /* Destroy all magazines */ |
||
700 | _slab_reclaim(cache, SLAB_RECLAIM_ALL); |
||
701 | |||
702 | /* All slabs must be empty */ |
||
703 | if (!list_empty(&cache->full_slabs) \ |
||
704 | || !list_empty(&cache->partial_slabs)) |
||
705 | panic("Destroying cache that is not empty."); |
||
706 | |||
789 | palkovsky | 707 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) |
791 | palkovsky | 708 | kfree(cache->mag_cache); |
769 | palkovsky | 709 | slab_free(&slab_cache_cache, cache); |
759 | palkovsky | 710 | } |
711 | |||
712 | /** Allocate new object from cache - if no flags given, always returns |
||
713 | memory */ |
||
714 | void * slab_alloc(slab_cache_t *cache, int flags) |
||
715 | { |
||
716 | ipl_t ipl; |
||
717 | void *result = NULL; |
||
773 | palkovsky | 718 | |
759 | palkovsky | 719 | /* Disable interrupts to avoid deadlocks with interrupt handlers */ |
720 | ipl = interrupts_disable(); |
||
771 | palkovsky | 721 | |
814 | palkovsky | 722 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) { |
759 | palkovsky | 723 | result = magazine_obj_get(cache); |
814 | palkovsky | 724 | } |
776 | palkovsky | 725 | if (!result) |
759 | palkovsky | 726 | result = slab_obj_create(cache, flags); |
727 | |||
769 | palkovsky | 728 | interrupts_restore(ipl); |
729 | |||
764 | palkovsky | 730 | if (result) |
731 | atomic_inc(&cache->allocated_objs); |
||
732 | |||
759 | palkovsky | 733 | return result; |
734 | } |
||
735 | |||
771 | palkovsky | 736 | /** Return object to cache, use slab if known */ |
737 | static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) |
||
759 | palkovsky | 738 | { |
739 | ipl_t ipl; |
||
740 | |||
741 | ipl = interrupts_disable(); |
||
742 | |||
762 | palkovsky | 743 | if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \ |
744 | || magazine_obj_put(cache, obj)) { |
||
776 | palkovsky | 745 | |
771 | palkovsky | 746 | slab_obj_destroy(cache, obj, slab); |
776 | palkovsky | 747 | |
759 | palkovsky | 748 | } |
769 | palkovsky | 749 | interrupts_restore(ipl); |
764 | palkovsky | 750 | atomic_dec(&cache->allocated_objs); |
759 | palkovsky | 751 | } |
752 | |||
771 | palkovsky | 753 | /** Return slab object to cache */ |
754 | void slab_free(slab_cache_t *cache, void *obj) |
||
755 | { |
||
756 | _slab_free(cache,obj,NULL); |
||
757 | } |
||
758 | |||
759 | palkovsky | 759 | /* Go through all caches and reclaim what is possible */ |
760 | count_t slab_reclaim(int flags) |
||
761 | { |
||
762 | slab_cache_t *cache; |
||
763 | link_t *cur; |
||
764 | count_t frames = 0; |
||
765 | |||
766 | spinlock_lock(&slab_cache_lock); |
||
767 | |||
776 | palkovsky | 768 | /* TODO: Add assert, that interrupts are disabled, otherwise |
769 | * memory allocation from interrupts can deadlock. |
||
770 | */ |
||
771 | |||
759 | palkovsky | 772 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
773 | cache = list_get_instance(cur, slab_cache_t, link); |
||
774 | frames += _slab_reclaim(cache, flags); |
||
775 | } |
||
776 | |||
777 | spinlock_unlock(&slab_cache_lock); |
||
778 | |||
779 | return frames; |
||
780 | } |
||
781 | |||
782 | |||
783 | /* Print list of slabs */ |
||
784 | void slab_print_list(void) |
||
785 | { |
||
786 | slab_cache_t *cache; |
||
787 | link_t *cur; |
||
783 | palkovsky | 788 | ipl_t ipl; |
789 | |||
790 | ipl = interrupts_disable(); |
||
759 | palkovsky | 791 | spinlock_lock(&slab_cache_lock); |
767 | palkovsky | 792 | printf("SLAB name\tOsize\tPages\tObj/pg\tSlabs\tCached\tAllocobjs\tCtl\n"); |
759 | palkovsky | 793 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
794 | cache = list_get_instance(cur, slab_cache_t, link); |
||
767 | palkovsky | 795 | printf("%s\t%d\t%d\t%d\t%d\t%d\t%d\t\t%s\n", cache->name, cache->size, |
766 | palkovsky | 796 | (1 << cache->order), cache->objects, |
767 | palkovsky | 797 | atomic_get(&cache->allocated_slabs), |
798 | atomic_get(&cache->cached_objs), |
||
766 | palkovsky | 799 | atomic_get(&cache->allocated_objs), |
800 | cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out"); |
||
759 | palkovsky | 801 | } |
802 | spinlock_unlock(&slab_cache_lock); |
||
783 | palkovsky | 803 | interrupts_restore(ipl); |
759 | palkovsky | 804 | } |
805 | |||
806 | void slab_cache_init(void) |
||
807 | { |
||
771 | palkovsky | 808 | int i, size; |
809 | |||
759 | palkovsky | 810 | /* Initialize magazine cache */ |
811 | _slab_cache_create(&mag_cache, |
||
812 | "slab_magazine", |
||
813 | sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*), |
||
814 | sizeof(__address), |
||
815 | NULL, NULL, |
||
769 | palkovsky | 816 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
817 | /* Initialize slab_cache cache */ |
||
818 | _slab_cache_create(&slab_cache_cache, |
||
819 | "slab_cache", |
||
789 | palkovsky | 820 | sizeof(slab_cache_cache), |
769 | palkovsky | 821 | sizeof(__address), |
822 | NULL, NULL, |
||
823 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
||
824 | /* Initialize external slab cache */ |
||
825 | slab_extern_cache = slab_cache_create("slab_extern", |
||
826 | sizeof(slab_t), |
||
827 | 0, NULL, NULL, |
||
789 | palkovsky | 828 | SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED); |
759 | palkovsky | 829 | |
830 | /* Initialize structures for malloc */ |
||
771 | palkovsky | 831 | for (i=0, size=(1<<SLAB_MIN_MALLOC_W); |
832 | i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1); |
||
833 | i++, size <<= 1) { |
||
834 | malloc_caches[i] = slab_cache_create(malloc_names[i], |
||
835 | size, 0, |
||
789 | palkovsky | 836 | NULL,NULL, SLAB_CACHE_MAGDEFERRED); |
771 | palkovsky | 837 | } |
778 | palkovsky | 838 | #ifdef CONFIG_DEBUG |
839 | _slab_initialized = 1; |
||
840 | #endif |
||
759 | palkovsky | 841 | } |
771 | palkovsky | 842 | |
789 | palkovsky | 843 | /** Enable cpu_cache |
844 | * |
||
845 | * Kernel calls this function, when it knows the real number of |
||
846 | * processors. |
||
847 | * Allocate slab for cpucache and enable it on all existing |
||
848 | * slabs that are SLAB_CACHE_MAGDEFERRED |
||
849 | */ |
||
850 | void slab_enable_cpucache(void) |
||
851 | { |
||
852 | link_t *cur; |
||
853 | slab_cache_t *s; |
||
854 | |||
791 | palkovsky | 855 | #ifdef CONFIG_DEBUG |
856 | _slab_initialized = 2; |
||
857 | #endif |
||
858 | |||
789 | palkovsky | 859 | spinlock_lock(&slab_cache_lock); |
860 | |||
861 | for (cur=slab_cache_list.next; cur != &slab_cache_list;cur=cur->next){ |
||
862 | s = list_get_instance(cur, slab_cache_t, link); |
||
863 | if ((s->flags & SLAB_CACHE_MAGDEFERRED) != SLAB_CACHE_MAGDEFERRED) |
||
864 | continue; |
||
865 | make_magcache(s); |
||
866 | s->flags &= ~SLAB_CACHE_MAGDEFERRED; |
||
867 | } |
||
868 | |||
869 | spinlock_unlock(&slab_cache_lock); |
||
870 | } |
||
871 | |||
771 | palkovsky | 872 | /**************************************/ |
873 | /* kalloc/kfree functions */ |
||
874 | void * kalloc(unsigned int size, int flags) |
||
875 | { |
||
876 | int idx; |
||
778 | palkovsky | 877 | |
878 | ASSERT(_slab_initialized); |
||
771 | palkovsky | 879 | ASSERT( size && size <= (1 << SLAB_MAX_MALLOC_W)); |
880 | |||
881 | if (size < (1 << SLAB_MIN_MALLOC_W)) |
||
882 | size = (1 << SLAB_MIN_MALLOC_W); |
||
883 | |||
884 | idx = fnzb(size-1) - SLAB_MIN_MALLOC_W + 1; |
||
885 | |||
886 | return slab_alloc(malloc_caches[idx], flags); |
||
887 | } |
||
888 | |||
889 | |||
890 | void kfree(void *obj) |
||
891 | { |
||
781 | palkovsky | 892 | slab_t *slab; |
893 | |||
894 | if (!obj) return; |
||
895 | |||
896 | slab = obj2slab(obj); |
||
771 | palkovsky | 897 | _slab_free(slab->cache, obj, slab); |
898 | } |