Rev 2124 | Rev 3057 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
| Rev 2124 | Rev 2745 | ||
|---|---|---|---|
| Line 170... | Line 170... | ||
| 170 | static slab_t * slab_space_alloc(slab_cache_t *cache, int flags) |
170 | static slab_t * slab_space_alloc(slab_cache_t *cache, int flags) |
| 171 | { |
171 | { |
| 172 | void *data; |
172 | void *data; |
| 173 | slab_t *slab; |
173 | slab_t *slab; |
| 174 | size_t fsize; |
174 | size_t fsize; |
| 175 | int i; |
175 | unsigned int i; |
| 176 | unsigned int zone = 0; |
176 | unsigned int zone = 0; |
| 177 | 177 | ||
| 178 | data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone); |
178 | data = frame_alloc_generic(cache->order, FRAME_KA | flags, &zone); |
| 179 | if (!data) { |
179 | if (!data) { |
| 180 | return NULL; |
180 | return NULL; |
| Line 189... | Line 189... | ||
| 189 | fsize = (PAGE_SIZE << cache->order); |
189 | fsize = (PAGE_SIZE << cache->order); |
| 190 | slab = data + fsize - sizeof(*slab); |
190 | slab = data + fsize - sizeof(*slab); |
| 191 | } |
191 | } |
| 192 | 192 | ||
| 193 | /* Fill in slab structures */ |
193 | /* Fill in slab structures */ |
| 194 | for (i=0; i < (1 << cache->order); i++) |
194 | for (i = 0; i < ((unsigned int) 1 << cache->order); i++) |
| 195 | frame_set_parent(ADDR2PFN(KA2PA(data))+i, slab, zone); |
195 | frame_set_parent(ADDR2PFN(KA2PA(data)) + i, slab, zone); |
| 196 | 196 | ||
| 197 | slab->start = data; |
197 | slab->start = data; |
| 198 | slab->available = cache->objects; |
198 | slab->available = cache->objects; |
| 199 | slab->nextavail = 0; |
199 | slab->nextavail = 0; |
| 200 | slab->cache = cache; |
200 | slab->cache = cache; |
| 201 | 201 | ||
| 202 | for (i=0; i<cache->objects;i++) |
202 | for (i = 0; i < cache->objects; i++) |
| 203 | *((int *) (slab->start + i*cache->size)) = i+1; |
203 | *((int *) (slab->start + i*cache->size)) = i+1; |
| 204 | 204 | ||
| 205 | atomic_inc(&cache->allocated_slabs); |
205 | atomic_inc(&cache->allocated_slabs); |
| 206 | return slab; |
206 | return slab; |
| 207 | } |
207 | } |
| Line 369... | Line 369... | ||
| 369 | * @return Number of freed pages |
369 | * @return Number of freed pages |
| 370 | */ |
370 | */ |
| 371 | static count_t magazine_destroy(slab_cache_t *cache, |
371 | static count_t magazine_destroy(slab_cache_t *cache, |
| 372 | slab_magazine_t *mag) |
372 | slab_magazine_t *mag) |
| 373 | { |
373 | { |
| 374 | int i; |
374 | unsigned int i; |
| 375 | count_t frames = 0; |
375 | count_t frames = 0; |
| 376 | 376 | ||
| 377 | for (i=0;i < mag->busy; i++) { |
377 | for (i = 0; i < mag->busy; i++) { |
| 378 | frames += slab_obj_destroy(cache, mag->objs[i], NULL); |
378 | frames += slab_obj_destroy(cache, mag->objs[i], NULL); |
| 379 | atomic_dec(&cache->cached_objs); |
379 | atomic_dec(&cache->cached_objs); |
| 380 | } |
380 | } |
| 381 | 381 | ||
| 382 | slab_free(&mag_cache, mag); |
382 | slab_free(&mag_cache, mag); |
| Line 525... | Line 525... | ||
| 525 | 525 | ||
| 526 | /**************************************/ |
526 | /**************************************/ |
| 527 | /* Slab cache functions */ |
527 | /* Slab cache functions */ |
| 528 | 528 | ||
| 529 | /** Return number of objects that fit in certain cache size */ |
529 | /** Return number of objects that fit in certain cache size */ |
| 530 | static int comp_objects(slab_cache_t *cache) |
530 | static unsigned int comp_objects(slab_cache_t *cache) |
| 531 | { |
531 | { |
| 532 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
532 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
| 533 | return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size; |
533 | return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size; |
| 534 | else |
534 | else |
| 535 | return (PAGE_SIZE << cache->order) / cache->size; |
535 | return (PAGE_SIZE << cache->order) / cache->size; |
| 536 | } |
536 | } |
| 537 | 537 | ||
| 538 | /** Return wasted space in slab */ |
538 | /** Return wasted space in slab */ |
| 539 | static int badness(slab_cache_t *cache) |
539 | static unsigned int badness(slab_cache_t *cache) |
| 540 | { |
540 | { |
| 541 | int objects; |
541 | unsigned int objects; |
| 542 | int ssize; |
542 | unsigned int ssize; |
| 543 | 543 | ||
| 544 | objects = comp_objects(cache); |
544 | objects = comp_objects(cache); |
| 545 | ssize = PAGE_SIZE << cache->order; |
545 | ssize = PAGE_SIZE << cache->order; |
| 546 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
546 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
| 547 | ssize -= sizeof(slab_t); |
547 | ssize -= sizeof(slab_t); |
| 548 | return ssize - objects*cache->size; |
548 | return ssize - objects * cache->size; |
| 549 | } |
549 | } |
| 550 | 550 | ||
| 551 | /** |
551 | /** |
| 552 | * Initialize mag_cache structure in slab cache |
552 | * Initialize mag_cache structure in slab cache |
| 553 | */ |
553 | */ |
| 554 | static void make_magcache(slab_cache_t *cache) |
554 | static void make_magcache(slab_cache_t *cache) |
| 555 | { |
555 | { |
| 556 | int i; |
556 | unsigned int i; |
| 557 | 557 | ||
| 558 | ASSERT(_slab_initialized >= 2); |
558 | ASSERT(_slab_initialized >= 2); |
| 559 | 559 | ||
| 560 | cache->mag_cache = malloc(sizeof(slab_mag_cache_t)*config.cpu_count,0); |
560 | cache->mag_cache = malloc(sizeof(slab_mag_cache_t)*config.cpu_count,0); |
| 561 | for (i=0; i < config.cpu_count; i++) { |
561 | for (i = 0; i < config.cpu_count; i++) { |
| 562 | memsetb((uintptr_t)&cache->mag_cache[i], |
562 | memsetb((uintptr_t)&cache->mag_cache[i], |
| 563 | sizeof(cache->mag_cache[i]), 0); |
563 | sizeof(cache->mag_cache[i]), 0); |
| 564 | spinlock_initialize(&cache->mag_cache[i].lock, |
564 | spinlock_initialize(&cache->mag_cache[i].lock, "slab_maglock_cpu"); |
| 565 | "slab_maglock_cpu"); |
- | |
| 566 | } |
565 | } |
| 567 | } |
566 | } |
| 568 | 567 | ||
| 569 | /** Initialize allocated memory as a slab cache */ |
568 | /** Initialize allocated memory as a slab cache */ |
| 570 | static void |
569 | static void |
| Line 652... | Line 651... | ||
| 652 | * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing |
651 | * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing |
| 653 | * @return Number of freed pages |
652 | * @return Number of freed pages |
| 654 | */ |
653 | */ |
| 655 | static count_t _slab_reclaim(slab_cache_t *cache, int flags) |
654 | static count_t _slab_reclaim(slab_cache_t *cache, int flags) |
| 656 | { |
655 | { |
| 657 | int i; |
656 | unsigned int i; |
| 658 | slab_magazine_t *mag; |
657 | slab_magazine_t *mag; |
| 659 | count_t frames = 0; |
658 | count_t frames = 0; |
| 660 | int magcount; |
659 | int magcount; |
| 661 | 660 | ||
| 662 | if (cache->flags & SLAB_CACHE_NOMAGAZINE) |
661 | if (cache->flags & SLAB_CACHE_NOMAGAZINE) |
| Line 673... | Line 672... | ||
| 673 | } |
672 | } |
| 674 | 673 | ||
| 675 | if (flags & SLAB_RECLAIM_ALL) { |
674 | if (flags & SLAB_RECLAIM_ALL) { |
| 676 | /* Free cpu-bound magazines */ |
675 | /* Free cpu-bound magazines */ |
| 677 | /* Destroy CPU magazines */ |
676 | /* Destroy CPU magazines */ |
| 678 | for (i=0; i<config.cpu_count; i++) { |
677 | for (i = 0; i < config.cpu_count; i++) { |
| 679 | spinlock_lock(&cache->mag_cache[i].lock); |
678 | spinlock_lock(&cache->mag_cache[i].lock); |
| 680 | 679 | ||
| 681 | mag = cache->mag_cache[i].current; |
680 | mag = cache->mag_cache[i].current; |
| 682 | if (mag) |
681 | if (mag) |
| 683 | frames += magazine_destroy(cache, mag); |
682 | frames += magazine_destroy(cache, mag); |