Rev 781 | Rev 783 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 781 | Rev 782 | ||
---|---|---|---|
Line 226... | Line 226... | ||
226 | * @return Number of freed pages |
226 | * @return Number of freed pages |
227 | */ |
227 | */ |
228 | static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, |
228 | static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, |
229 | slab_t *slab) |
229 | slab_t *slab) |
230 | { |
230 | { |
231 | count_t frames = 0; |
- | |
232 | - | ||
233 | if (!slab) |
231 | if (!slab) |
234 | slab = obj2slab(obj); |
232 | slab = obj2slab(obj); |
235 | 233 | ||
236 | ASSERT(slab->cache == cache); |
234 | ASSERT(slab->cache == cache); |
237 | ASSERT(slab->available < cache->objects); |
235 | ASSERT(slab->available < cache->objects); |
Line 244... | Line 242... | ||
244 | 242 | ||
245 | /* Move it to correct list */ |
243 | /* Move it to correct list */ |
246 | if (slab->available == cache->objects) { |
244 | if (slab->available == cache->objects) { |
247 | /* Free associated memory */ |
245 | /* Free associated memory */ |
248 | list_remove(&slab->link); |
246 | list_remove(&slab->link); |
249 | /* This should not produce deadlock, as |
247 | spinlock_unlock(&cache->slablock); |
250 | * magazine is always allocated with NO reclaim, |
- | |
251 | * keep all locks */ |
- | |
- | 248 | ||
252 | frames = slab_space_free(cache, slab); |
249 | return slab_space_free(cache, slab); |
- | 250 | ||
253 | } else if (slab->available == 1) { |
251 | } else if (slab->available == 1) { |
254 | /* It was in full, move to partial */ |
252 | /* It was in full, move to partial */ |
255 | list_remove(&slab->link); |
253 | list_remove(&slab->link); |
256 | list_prepend(&slab->link, &cache->partial_slabs); |
254 | list_prepend(&slab->link, &cache->partial_slabs); |
- | 255 | spinlock_unlock(&cache->slablock); |
|
257 | } |
256 | } |
258 | - | ||
259 | spinlock_unlock(&cache->slablock); |
- | |
260 | - | ||
261 | return frames; |
257 | return 0; |
262 | } |
258 | } |
263 | 259 | ||
264 | /** |
260 | /** |
265 | * Take new object from slab or create new if needed |
261 | * Take new object from slab or create new if needed |
266 | * |
262 | * |
Line 742... | Line 738... | ||
742 | 738 | ||
743 | spinlock_lock(&slab_cache_lock); |
739 | spinlock_lock(&slab_cache_lock); |
744 | 740 | ||
745 | /* TODO: Add assert, that interrupts are disabled, otherwise |
741 | /* TODO: Add assert, that interrupts are disabled, otherwise |
746 | * memory allocation from interrupts can deadlock. |
742 | * memory allocation from interrupts can deadlock. |
747 | * - cache_destroy can call this with interrupts enabled :-/ |
- | |
748 | */ |
743 | */ |
749 | 744 | ||
750 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
745 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
751 | cache = list_get_instance(cur, slab_cache_t, link); |
746 | cache = list_get_instance(cur, slab_cache_t, link); |
752 | frames += _slab_reclaim(cache, flags); |
747 | frames += _slab_reclaim(cache, flags); |