Rev 1248 | Rev 1428 | Go to most recent revision | Show entire file | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1248 | Rev 1288 | ||
---|---|---|---|
Line 173... | Line 173... | ||
173 | } |
173 | } |
174 | } else { |
174 | } else { |
175 | fsize = (PAGE_SIZE << cache->order); |
175 | fsize = (PAGE_SIZE << cache->order); |
176 | slab = data + fsize - sizeof(*slab); |
176 | slab = data + fsize - sizeof(*slab); |
177 | } |
177 | } |
178 | 178 | ||
179 | /* Fill in slab structures */ |
179 | /* Fill in slab structures */ |
180 | for (i=0; i < (1 << cache->order); i++) |
180 | for (i=0; i < (1 << cache->order); i++) |
181 | frame_set_parent(pfn+i, slab, zone); |
181 | frame_set_parent(pfn+i, slab, zone); |
182 | 182 | ||
183 | slab->start = data; |
183 | slab->start = data; |
Line 275... | Line 275... | ||
275 | spinlock_lock(&cache->slablock); |
275 | spinlock_lock(&cache->slablock); |
276 | 276 | ||
277 | if (list_empty(&cache->partial_slabs)) { |
277 | if (list_empty(&cache->partial_slabs)) { |
278 | /* Allow recursion and reclaiming |
278 | /* Allow recursion and reclaiming |
279 | * - this should work, as the slab control structures |
279 | * - this should work, as the slab control structures |
280 | * are small and do not need to allocte with anything |
280 | * are small and do not need to allocate with anything |
281 | * other ten frame_alloc when they are allocating, |
281 | * other than frame_alloc when they are allocating, |
282 | * that's why we should get recursion at most 1-level deep |
282 | * that's why we should get recursion at most 1-level deep |
283 | */ |
283 | */ |
284 | spinlock_unlock(&cache->slablock); |
284 | spinlock_unlock(&cache->slablock); |
285 | slab = slab_space_alloc(cache, flags); |
285 | slab = slab_space_alloc(cache, flags); |
286 | if (!slab) |
286 | if (!slab) |
Line 877... | Line 877... | ||
877 | void * malloc(unsigned int size, int flags) |
877 | void * malloc(unsigned int size, int flags) |
878 | { |
878 | { |
879 | int idx; |
879 | int idx; |
880 | 880 | ||
881 | ASSERT(_slab_initialized); |
881 | ASSERT(_slab_initialized); |
882 | ASSERT( size && size <= (1 << SLAB_MAX_MALLOC_W)); |
882 | ASSERT(size && size <= (1 << SLAB_MAX_MALLOC_W)); |
883 | 883 | ||
884 | if (size < (1 << SLAB_MIN_MALLOC_W)) |
884 | if (size < (1 << SLAB_MIN_MALLOC_W)) |
885 | size = (1 << SLAB_MIN_MALLOC_W); |
885 | size = (1 << SLAB_MIN_MALLOC_W); |
886 | 886 | ||
887 | idx = fnzb(size-1) - SLAB_MIN_MALLOC_W + 1; |
887 | idx = fnzb(size-1) - SLAB_MIN_MALLOC_W + 1; |
888 | 888 | ||
889 | return slab_alloc(malloc_caches[idx], flags); |
889 | return slab_alloc(malloc_caches[idx], flags); |
890 | } |
890 | } |
891 | 891 | ||
892 | - | ||
893 | void free(void *obj) |
892 | void free(void *obj) |
894 | { |
893 | { |
895 | slab_t *slab; |
894 | slab_t *slab; |
896 | 895 | ||
897 | if (!obj) return; |
896 | if (!obj) return; |