Rev 775 | Rev 778 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 775 | Rev 776 | ||
---|---|---|---|
Line 83... | Line 83... | ||
83 | * from non-cpu-cached mag. cache). This would provide a nice per-cpu |
83 | * from non-cpu-cached mag. cache). This would provide a nice per-cpu |
84 | * buffer. The other possibility is to use the per-cache |
84 | * buffer. The other possibility is to use the per-cache |
85 | * 'empty-magazine-list', which decreases competing for 1 per-system |
85 | * 'empty-magazine-list', which decreases competing for 1 per-system |
86 | * magazine cache. |
86 | * magazine cache. |
87 | * |
87 | * |
- | 88 | * - it might be good to add granularity of locks even to slab level, |
|
- | 89 | * we could then try_spinlock over all partial slabs and thus improve |
|
- | 90 | * scalability even on slab level |
|
88 | */ |
91 | */ |
89 | 92 | ||
90 | 93 | ||
91 | #include <synch/spinlock.h> |
94 | #include <synch/spinlock.h> |
92 | #include <mm/slab.h> |
95 | #include <mm/slab.h> |
Line 216... | Line 219... | ||
216 | 219 | ||
217 | 220 | ||
218 | /** |
221 | /** |
219 | * Return object to slab and call a destructor |
222 | * Return object to slab and call a destructor |
220 | * |
223 | * |
221 | * Assume the cache->lock is held; |
- | |
222 | * |
- | |
223 | * @param slab If the caller knows directly slab of the object, otherwise NULL |
224 | * @param slab If the caller knows directly slab of the object, otherwise NULL |
224 | * |
225 | * |
225 | * @return Number of freed pages |
226 | * @return Number of freed pages |
226 | */ |
227 | */ |
227 | static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, |
228 | static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, |
Line 232... | Line 233... | ||
232 | if (!slab) |
233 | if (!slab) |
233 | slab = obj2slab(obj); |
234 | slab = obj2slab(obj); |
234 | 235 | ||
235 | ASSERT(slab->cache == cache); |
236 | ASSERT(slab->cache == cache); |
236 | 237 | ||
- | 238 | spinlock_lock(&cache->slablock); |
|
- | 239 | ||
237 | *((int *)obj) = slab->nextavail; |
240 | *((int *)obj) = slab->nextavail; |
238 | slab->nextavail = (obj - slab->start)/cache->size; |
241 | slab->nextavail = (obj - slab->start)/cache->size; |
239 | slab->available++; |
242 | slab->available++; |
240 | 243 | ||
241 | /* Move it to correct list */ |
244 | /* Move it to correct list */ |
Line 245... | Line 248... | ||
245 | list_prepend(&slab->link, &cache->partial_slabs); |
248 | list_prepend(&slab->link, &cache->partial_slabs); |
246 | } |
249 | } |
247 | if (slab->available == cache->objects) { |
250 | if (slab->available == cache->objects) { |
248 | /* Free associated memory */ |
251 | /* Free associated memory */ |
249 | list_remove(&slab->link); |
252 | list_remove(&slab->link); |
250 | /* Avoid deadlock */ |
253 | /* This should not produce deadlock, as |
- | 254 | * magazine is always allocated with NO reclaim, |
|
251 | spinlock_unlock(&cache->lock); |
255 | * keep all locks */ |
252 | frames = slab_space_free(cache, slab); |
256 | frames = slab_space_free(cache, slab); |
253 | spinlock_lock(&cache->lock); |
- | |
254 | } |
257 | } |
255 | 258 | ||
- | 259 | spinlock_unlock(&cache->slablock); |
|
- | 260 | ||
256 | return frames; |
261 | return frames; |
257 | } |
262 | } |
258 | 263 | ||
259 | /** |
264 | /** |
260 | * Take new object from slab or create new if needed |
265 | * Take new object from slab or create new if needed |
261 | * |
266 | * |
262 | * Assume cache->lock is held. |
- | |
263 | * |
- | |
264 | * @return Object address or null |
267 | * @return Object address or null |
265 | */ |
268 | */ |
266 | static void * slab_obj_create(slab_cache_t *cache, int flags) |
269 | static void * slab_obj_create(slab_cache_t *cache, int flags) |
267 | { |
270 | { |
268 | slab_t *slab; |
271 | slab_t *slab; |
269 | void *obj; |
272 | void *obj; |
270 | 273 | ||
- | 274 | spinlock_lock(&cache->slablock); |
|
- | 275 | ||
271 | if (list_empty(&cache->partial_slabs)) { |
276 | if (list_empty(&cache->partial_slabs)) { |
272 | /* Allow recursion and reclaiming |
277 | /* Allow recursion and reclaiming |
273 | * - this should work, as the SLAB control structures |
278 | * - this should work, as the SLAB control structures |
274 | * are small and do not need to allocte with anything |
279 | * are small and do not need to allocte with anything |
275 | * other ten frame_alloc when they are allocating, |
280 | * other ten frame_alloc when they are allocating, |
276 | * that's why we should get recursion at most 1-level deep |
281 | * that's why we should get recursion at most 1-level deep |
277 | */ |
282 | */ |
278 | spinlock_unlock(&cache->lock); |
283 | spinlock_unlock(&cache->slablock); |
279 | slab = slab_space_alloc(cache, flags); |
284 | slab = slab_space_alloc(cache, flags); |
280 | spinlock_lock(&cache->lock); |
285 | spinlock_lock(&cache->slablock); |
281 | if (!slab) { |
286 | if (!slab) |
282 | return NULL; |
287 | goto err; |
283 | } |
- | |
284 | } else { |
288 | } else { |
285 | slab = list_get_instance(cache->partial_slabs.next, |
289 | slab = list_get_instance(cache->partial_slabs.next, |
286 | slab_t, |
290 | slab_t, |
287 | link); |
291 | link); |
288 | list_remove(&slab->link); |
292 | list_remove(&slab->link); |
Line 292... | Line 296... | ||
292 | slab->available--; |
296 | slab->available--; |
293 | if (! slab->available) |
297 | if (! slab->available) |
294 | list_prepend(&slab->link, &cache->full_slabs); |
298 | list_prepend(&slab->link, &cache->full_slabs); |
295 | else |
299 | else |
296 | list_prepend(&slab->link, &cache->partial_slabs); |
300 | list_prepend(&slab->link, &cache->partial_slabs); |
- | 301 | ||
- | 302 | spinlock_unlock(&cache->slablock); |
|
297 | return obj; |
303 | return obj; |
- | 304 | err: |
|
- | 305 | spinlock_unlock(&cache->slablock); |
|
- | 306 | return NULL; |
|
298 | } |
307 | } |
299 | 308 | ||
300 | /**************************************/ |
309 | /**************************************/ |
301 | /* CPU-Cache slab functions */ |
310 | /* CPU-Cache slab functions */ |
302 | 311 | ||
303 | /** |
312 | /** |
304 | * Free all objects in magazine and free memory associated with magazine |
313 | * Free all objects in magazine and free memory associated with magazine |
305 | * |
314 | * |
306 | * Assume cache->lock is held |
- | |
307 | * |
- | |
308 | * @return Number of freed pages |
315 | * @return Number of freed pages |
309 | */ |
316 | */ |
310 | static count_t magazine_destroy(slab_cache_t *cache, |
317 | static count_t magazine_destroy(slab_cache_t *cache, |
311 | slab_magazine_t *mag) |
318 | slab_magazine_t *mag) |
312 | { |
319 | { |
Line 343... | Line 350... | ||
343 | cache->mag_cache[CPU->id].last = cmag; |
350 | cache->mag_cache[CPU->id].last = cmag; |
344 | return lastmag; |
351 | return lastmag; |
345 | } |
352 | } |
346 | } |
353 | } |
347 | /* Local magazines are empty, import one from magazine list */ |
354 | /* Local magazines are empty, import one from magazine list */ |
348 | spinlock_lock(&cache->lock); |
355 | spinlock_lock(&cache->maglock); |
349 | if (list_empty(&cache->magazines)) { |
356 | if (list_empty(&cache->magazines)) { |
350 | spinlock_unlock(&cache->lock); |
357 | spinlock_unlock(&cache->maglock); |
351 | return NULL; |
358 | return NULL; |
352 | } |
359 | } |
353 | newmag = list_get_instance(cache->magazines.next, |
360 | newmag = list_get_instance(cache->magazines.next, |
354 | slab_magazine_t, |
361 | slab_magazine_t, |
355 | link); |
362 | link); |
356 | list_remove(&newmag->link); |
363 | list_remove(&newmag->link); |
357 | spinlock_unlock(&cache->lock); |
364 | spinlock_unlock(&cache->maglock); |
358 | 365 | ||
359 | if (lastmag) |
366 | if (lastmag) |
360 | slab_free(&mag_cache, lastmag); |
367 | slab_free(&mag_cache, lastmag); |
361 | cache->mag_cache[CPU->id].last = cmag; |
368 | cache->mag_cache[CPU->id].last = cmag; |
362 | cache->mag_cache[CPU->id].current = newmag; |
369 | cache->mag_cache[CPU->id].current = newmag; |
Line 429... | Line 436... | ||
429 | newmag->size = SLAB_MAG_SIZE; |
436 | newmag->size = SLAB_MAG_SIZE; |
430 | newmag->busy = 0; |
437 | newmag->busy = 0; |
431 | 438 | ||
432 | /* Flush last to magazine list */ |
439 | /* Flush last to magazine list */ |
433 | if (lastmag) { |
440 | if (lastmag) { |
434 | spinlock_lock(&cache->lock); |
441 | spinlock_lock(&cache->maglock); |
435 | list_prepend(&lastmag->link, &cache->magazines); |
442 | list_prepend(&lastmag->link, &cache->magazines); |
436 | spinlock_unlock(&cache->lock); |
443 | spinlock_unlock(&cache->maglock); |
437 | } |
444 | } |
438 | /* Move current as last, save new as current */ |
445 | /* Move current as last, save new as current */ |
439 | cache->mag_cache[CPU->id].last = cmag; |
446 | cache->mag_cache[CPU->id].last = cmag; |
440 | cache->mag_cache[CPU->id].current = newmag; |
447 | cache->mag_cache[CPU->id].current = newmag; |
441 | 448 | ||
Line 522... | Line 529... | ||
522 | cache->flags = flags; |
529 | cache->flags = flags; |
523 | 530 | ||
524 | list_initialize(&cache->full_slabs); |
531 | list_initialize(&cache->full_slabs); |
525 | list_initialize(&cache->partial_slabs); |
532 | list_initialize(&cache->partial_slabs); |
526 | list_initialize(&cache->magazines); |
533 | list_initialize(&cache->magazines); |
527 | spinlock_initialize(&cache->lock, "cachelock"); |
534 | spinlock_initialize(&cache->slablock, "slab_lock"); |
- | 535 | spinlock_initialize(&cache->maglock, "slab_maglock"); |
|
528 | if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) { |
536 | if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) { |
529 | for (i=0; i < config.cpu_count; i++) { |
537 | for (i=0; i < config.cpu_count; i++) { |
530 | memsetb((__address)&cache->mag_cache[i], |
538 | memsetb((__address)&cache->mag_cache[i], |
531 | sizeof(cache->mag_cache[i]), 0); |
539 | sizeof(cache->mag_cache[i]), 0); |
532 | spinlock_initialize(&cache->mag_cache[i].lock, |
540 | spinlock_initialize(&cache->mag_cache[i].lock, |
533 | "cpucachelock"); |
541 | "slab_maglock_cpu"); |
534 | } |
542 | } |
535 | } |
543 | } |
536 | 544 | ||
537 | /* Compute slab sizes, object counts in slabs etc. */ |
545 | /* Compute slab sizes, object counts in slabs etc. */ |
538 | if (cache->size < SLAB_INSIDE_SIZE) |
546 | if (cache->size < SLAB_INSIDE_SIZE) |
Line 592... | Line 600... | ||
592 | /* First lock all cpu caches, then the complete cache lock */ |
600 | /* First lock all cpu caches, then the complete cache lock */ |
593 | if (flags & SLAB_RECLAIM_ALL) { |
601 | if (flags & SLAB_RECLAIM_ALL) { |
594 | for (i=0; i < config.cpu_count; i++) |
602 | for (i=0; i < config.cpu_count; i++) |
595 | spinlock_lock(&cache->mag_cache[i].lock); |
603 | spinlock_lock(&cache->mag_cache[i].lock); |
596 | } |
604 | } |
597 | spinlock_lock(&cache->lock); |
605 | spinlock_lock(&cache->maglock); |
598 | 606 | ||
599 | if (flags & SLAB_RECLAIM_ALL) { |
607 | if (flags & SLAB_RECLAIM_ALL) { |
600 | /* Aggressive memfree */ |
608 | /* Aggressive memfree */ |
601 | /* Destroy CPU magazines */ |
609 | /* Destroy CPU magazines */ |
602 | for (i=0; i<config.cpu_count; i++) { |
610 | for (i=0; i<config.cpu_count; i++) { |
Line 609... | Line 617... | ||
609 | if (mag) |
617 | if (mag) |
610 | frames += magazine_destroy(cache, mag); |
618 | frames += magazine_destroy(cache, mag); |
611 | cache->mag_cache[i].last = NULL; |
619 | cache->mag_cache[i].last = NULL; |
612 | } |
620 | } |
613 | } |
621 | } |
- | 622 | /* We can release the cache locks now */ |
|
- | 623 | if (flags & SLAB_RECLAIM_ALL) { |
|
- | 624 | for (i=0; i < config.cpu_count; i++) |
|
- | 625 | spinlock_unlock(&cache->mag_cache[i].lock); |
|
- | 626 | } |
|
614 | /* Destroy full magazines */ |
627 | /* Destroy full magazines */ |
615 | cur=cache->magazines.prev; |
628 | cur=cache->magazines.prev; |
616 | 629 | ||
617 | while (cur != &cache->magazines) { |
630 | while (cur != &cache->magazines) { |
618 | mag = list_get_instance(cur, slab_magazine_t, link); |
631 | mag = list_get_instance(cur, slab_magazine_t, link); |
Line 624... | Line 637... | ||
624 | * as soon as something is freed */ |
637 | * as soon as something is freed */ |
625 | if (!(flags & SLAB_RECLAIM_ALL) && frames) |
638 | if (!(flags & SLAB_RECLAIM_ALL) && frames) |
626 | break; |
639 | break; |
627 | } |
640 | } |
628 | 641 | ||
629 | spinlock_unlock(&cache->lock); |
642 | spinlock_unlock(&cache->maglock); |
630 | /* We can release the cache locks now */ |
- | |
631 | if (flags & SLAB_RECLAIM_ALL) { |
- | |
632 | for (i=0; i < config.cpu_count; i++) |
- | |
633 | spinlock_unlock(&cache->mag_cache[i].lock); |
- | |
634 | } |
- | |
635 | 643 | ||
636 | return frames; |
644 | return frames; |
637 | } |
645 | } |
638 | 646 | ||
639 | /** Check that there are no slabs and remove cache from system */ |
647 | /** Check that there are no slabs and remove cache from system */ |
Line 668... | Line 676... | ||
668 | ipl = interrupts_disable(); |
676 | ipl = interrupts_disable(); |
669 | 677 | ||
670 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) |
678 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) |
671 | result = magazine_obj_get(cache); |
679 | result = magazine_obj_get(cache); |
672 | 680 | ||
673 | if (!result) { |
681 | if (!result) |
674 | spinlock_lock(&cache->lock); |
- | |
675 | result = slab_obj_create(cache, flags); |
682 | result = slab_obj_create(cache, flags); |
676 | spinlock_unlock(&cache->lock); |
- | |
677 | } |
- | |
678 | 683 | ||
679 | interrupts_restore(ipl); |
684 | interrupts_restore(ipl); |
680 | 685 | ||
681 | if (result) |
686 | if (result) |
682 | atomic_inc(&cache->allocated_objs); |
687 | atomic_inc(&cache->allocated_objs); |
Line 691... | Line 696... | ||
691 | 696 | ||
692 | ipl = interrupts_disable(); |
697 | ipl = interrupts_disable(); |
693 | 698 | ||
694 | if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \ |
699 | if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \ |
695 | || magazine_obj_put(cache, obj)) { |
700 | || magazine_obj_put(cache, obj)) { |
696 | spinlock_lock(&cache->lock); |
- | |
- | 701 | ||
697 | slab_obj_destroy(cache, obj, slab); |
702 | slab_obj_destroy(cache, obj, slab); |
698 | spinlock_unlock(&cache->lock); |
- | |
- | 703 | ||
699 | } |
704 | } |
700 | interrupts_restore(ipl); |
705 | interrupts_restore(ipl); |
701 | atomic_dec(&cache->allocated_objs); |
706 | atomic_dec(&cache->allocated_objs); |
702 | } |
707 | } |
703 | 708 | ||
Line 714... | Line 719... | ||
714 | link_t *cur; |
719 | link_t *cur; |
715 | count_t frames = 0; |
720 | count_t frames = 0; |
716 | 721 | ||
717 | spinlock_lock(&slab_cache_lock); |
722 | spinlock_lock(&slab_cache_lock); |
718 | 723 | ||
- | 724 | /* TODO: Add assert, that interrupts are disabled, otherwise |
|
- | 725 | * memory allocation from interrupts can deadlock. |
|
- | 726 | */ |
|
- | 727 | ||
719 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
728 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
720 | cache = list_get_instance(cur, slab_cache_t, link); |
729 | cache = list_get_instance(cur, slab_cache_t, link); |
721 | frames += _slab_reclaim(cache, flags); |
730 | frames += _slab_reclaim(cache, flags); |
722 | } |
731 | } |
723 | 732 |