Rev 769 | Rev 772 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 769 | Rev 771 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2006 Ondrej Palkovsky |
2 | * Copyright (C) 2006 Ondrej Palkovsky |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /* |
29 | /* |
30 | * The SLAB allocator is closely modelled after Opensolaris SLAB allocator |
30 | * The SLAB allocator is closely modelled after Opensolaris SLAB allocator |
31 | * http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/ |
31 | * http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/ |
32 | * |
32 | * |
33 | * with the following exceptions: |
33 | * with the following exceptions: |
34 | * - empty SLABS are deallocated immediately |
34 | * - empty SLABS are deallocated immediately |
35 | * (in Linux they are kept in linked list, in Solaris ???) |
35 | * (in Linux they are kept in linked list, in Solaris ???) |
36 | * - empty magazines are deallocated when not needed |
36 | * - empty magazines are deallocated when not needed |
37 | * (in Solaris they are held in linked list in slab cache) |
37 | * (in Solaris they are held in linked list in slab cache) |
38 | * |
38 | * |
39 | * Following features are not currently supported but would be easy to do: |
39 | * Following features are not currently supported but would be easy to do: |
40 | * - cache coloring |
40 | * - cache coloring |
41 | * - dynamic magazine growing (different magazine sizes are already |
41 | * - dynamic magazine growing (different magazine sizes are already |
42 | * supported, but we would need to adjust allocating strategy) |
42 | * supported, but we would need to adjust allocating strategy) |
43 | * |
43 | * |
44 | * The SLAB allocator supports per-CPU caches ('magazines') to facilitate |
44 | * The SLAB allocator supports per-CPU caches ('magazines') to facilitate |
45 | * good SMP scaling. |
45 | * good SMP scaling. |
46 | * |
46 | * |
47 | * When a new object is being allocated, it is first checked, if it is |
47 | * When a new object is being allocated, it is first checked, if it is |
48 | * available in CPU-bound magazine. If it is not found there, it is |
48 | * available in CPU-bound magazine. If it is not found there, it is |
49 | * allocated from CPU-shared SLAB - if partial full is found, it is used, |
49 | * allocated from CPU-shared SLAB - if partial full is found, it is used, |
50 | * otherwise a new one is allocated. |
50 | * otherwise a new one is allocated. |
51 | * |
51 | * |
52 | * When an object is being deallocated, it is put to CPU-bound magazine. |
52 | * When an object is being deallocated, it is put to CPU-bound magazine. |
53 | * If there is no such magazine, new one is allocated (if it fails, |
53 | * If there is no such magazine, new one is allocated (if it fails, |
54 | * the object is deallocated into SLAB). If the magazine is full, it is |
54 | * the object is deallocated into SLAB). If the magazine is full, it is |
55 | * put into cpu-shared list of magazines and new one is allocated. |
55 | * put into cpu-shared list of magazines and new one is allocated. |
56 | * |
56 | * |
57 | * The CPU-bound magazine is actually a pair of magazine to avoid |
57 | * The CPU-bound magazine is actually a pair of magazine to avoid |
58 | * thrashing when somebody is allocating/deallocating 1 item at the magazine |
58 | * thrashing when somebody is allocating/deallocating 1 item at the magazine |
59 | * size boundary. LIFO order is enforced, which should avoid fragmentation |
59 | * size boundary. LIFO order is enforced, which should avoid fragmentation |
60 | * as much as possible. |
60 | * as much as possible. |
61 | * |
61 | * |
62 | * Every cache contains list of full slabs and list of partialy full slabs. |
62 | * Every cache contains list of full slabs and list of partialy full slabs. |
63 | * Empty SLABS are immediately freed (thrashing will be avoided because |
63 | * Empty SLABS are immediately freed (thrashing will be avoided because |
64 | * of magazines). |
64 | * of magazines). |
65 | * |
65 | * |
66 | * The SLAB information structure is kept inside the data area, if possible. |
66 | * The SLAB information structure is kept inside the data area, if possible. |
67 | * The cache can be marked that it should not use magazines. This is used |
67 | * The cache can be marked that it should not use magazines. This is used |
68 | * only for SLAB related caches to avoid deadlocks and infinite recursion |
68 | * only for SLAB related caches to avoid deadlocks and infinite recursion |
69 | * (the SLAB allocator uses itself for allocating all it's control structures). |
69 | * (the SLAB allocator uses itself for allocating all it's control structures). |
70 | * |
70 | * |
71 | * The SLAB allocator allocates lot of space and does not free it. When |
71 | * The SLAB allocator allocates lot of space and does not free it. When |
72 | * frame allocator fails to allocate the frame, it calls slab_reclaim(). |
72 | * frame allocator fails to allocate the frame, it calls slab_reclaim(). |
73 | * It tries 'light reclaim' first, then brutal reclaim. The light reclaim |
73 | * It tries 'light reclaim' first, then brutal reclaim. The light reclaim |
74 | * releases slabs from cpu-shared magazine-list, until at least 1 slab |
74 | * releases slabs from cpu-shared magazine-list, until at least 1 slab |
75 | * is deallocated in each cache (this algorithm should probably change). |
75 | * is deallocated in each cache (this algorithm should probably change). |
76 | * The brutal reclaim removes all cached objects, even from CPU-bound |
76 | * The brutal reclaim removes all cached objects, even from CPU-bound |
77 | * magazines. |
77 | * magazines. |
78 | * |
78 | * |
79 | * |
79 | * |
80 | */ |
80 | */ |
81 | 81 | ||
82 | 82 | ||
83 | #include <synch/spinlock.h> |
83 | #include <synch/spinlock.h> |
84 | #include <mm/slab.h> |
84 | #include <mm/slab.h> |
85 | #include <list.h> |
85 | #include <list.h> |
86 | #include <memstr.h> |
86 | #include <memstr.h> |
87 | #include <align.h> |
87 | #include <align.h> |
88 | #include <mm/heap.h> |
88 | #include <mm/heap.h> |
89 | #include <mm/frame.h> |
89 | #include <mm/frame.h> |
90 | #include <config.h> |
90 | #include <config.h> |
91 | #include <print.h> |
91 | #include <print.h> |
92 | #include <arch.h> |
92 | #include <arch.h> |
93 | #include <panic.h> |
93 | #include <panic.h> |
94 | #include <debug.h> |
94 | #include <debug.h> |
- | 95 | #include <bitops.h> |
|
95 | 96 | ||
96 | SPINLOCK_INITIALIZE(slab_cache_lock); |
97 | SPINLOCK_INITIALIZE(slab_cache_lock); |
97 | static LIST_INITIALIZE(slab_cache_list); |
98 | static LIST_INITIALIZE(slab_cache_list); |
98 | 99 | ||
99 | /** Magazine cache */ |
100 | /** Magazine cache */ |
100 | static slab_cache_t mag_cache; |
101 | static slab_cache_t mag_cache; |
101 | /** Cache for cache descriptors */ |
102 | /** Cache for cache descriptors */ |
102 | static slab_cache_t slab_cache_cache; |
103 | static slab_cache_t slab_cache_cache; |
103 | 104 | ||
104 | /** Cache for external slab descriptors |
105 | /** Cache for external slab descriptors |
105 | * This time we want per-cpu cache, so do not make it static |
106 | * This time we want per-cpu cache, so do not make it static |
106 | * - using SLAB for internal SLAB structures will not deadlock, |
107 | * - using SLAB for internal SLAB structures will not deadlock, |
107 | * as all slab structures are 'small' - control structures of |
108 | * as all slab structures are 'small' - control structures of |
108 | * their caches do not require further allocation |
109 | * their caches do not require further allocation |
109 | */ |
110 | */ |
110 | static slab_cache_t *slab_extern_cache; |
111 | static slab_cache_t *slab_extern_cache; |
- | 112 | /** Caches for malloc */ |
|
- | 113 | static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1]; |
|
- | 114 | char *malloc_names[] = { |
|
- | 115 | "malloc-8","malloc-16","malloc-32","malloc-64","malloc-128", |
|
- | 116 | "malloc-256","malloc-512","malloc-1K","malloc-2K", |
|
- | 117 | "malloc-4K","malloc-8K","malloc-16K","malloc-32K", |
|
- | 118 | "malloc-64K","malloc-128K" |
|
- | 119 | }; |
|
111 | 120 | ||
112 | /** Slab descriptor */ |
121 | /** Slab descriptor */ |
113 | typedef struct { |
122 | typedef struct { |
114 | slab_cache_t *cache; /**< Pointer to parent cache */ |
123 | slab_cache_t *cache; /**< Pointer to parent cache */ |
115 | link_t link; /* List of full/partial slabs */ |
124 | link_t link; /* List of full/partial slabs */ |
116 | void *start; /**< Start address of first available item */ |
125 | void *start; /**< Start address of first available item */ |
117 | count_t available; /**< Count of available items in this slab */ |
126 | count_t available; /**< Count of available items in this slab */ |
118 | index_t nextavail; /**< The index of next available item */ |
127 | index_t nextavail; /**< The index of next available item */ |
119 | }slab_t; |
128 | }slab_t; |
120 | 129 | ||
121 | /**************************************/ |
130 | /**************************************/ |
122 | /* SLAB allocation functions */ |
131 | /* SLAB allocation functions */ |
123 | 132 | ||
124 | /** |
133 | /** |
125 | * Allocate frames for slab space and initialize |
134 | * Allocate frames for slab space and initialize |
126 | * |
135 | * |
127 | */ |
136 | */ |
128 | static slab_t * slab_space_alloc(slab_cache_t *cache, int flags) |
137 | static slab_t * slab_space_alloc(slab_cache_t *cache, int flags) |
129 | { |
138 | { |
130 | void *data; |
139 | void *data; |
131 | slab_t *slab; |
140 | slab_t *slab; |
132 | size_t fsize; |
141 | size_t fsize; |
133 | int i; |
142 | int i; |
134 | zone_t *zone = NULL; |
143 | zone_t *zone = NULL; |
135 | int status; |
144 | int status; |
136 | frame_t *frame; |
145 | frame_t *frame; |
137 | 146 | ||
138 | data = (void *)frame_alloc(FRAME_KA | flags, cache->order, &status, &zone); |
147 | data = (void *)frame_alloc(FRAME_KA | flags, cache->order, &status, &zone); |
139 | if (status != FRAME_OK) { |
148 | if (status != FRAME_OK) { |
140 | return NULL; |
149 | return NULL; |
141 | } |
150 | } |
142 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) { |
151 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) { |
143 | slab = slab_alloc(slab_extern_cache, flags); |
152 | slab = slab_alloc(slab_extern_cache, flags); |
144 | if (!slab) { |
153 | if (!slab) { |
145 | frame_free((__address)data); |
154 | frame_free((__address)data); |
146 | return NULL; |
155 | return NULL; |
147 | } |
156 | } |
148 | } else { |
157 | } else { |
149 | fsize = (PAGE_SIZE << cache->order); |
158 | fsize = (PAGE_SIZE << cache->order); |
150 | slab = data + fsize - sizeof(*slab); |
159 | slab = data + fsize - sizeof(*slab); |
151 | } |
160 | } |
152 | 161 | ||
153 | /* Fill in slab structures */ |
162 | /* Fill in slab structures */ |
154 | /* TODO: some better way of accessing the frame */ |
163 | /* TODO: some better way of accessing the frame */ |
155 | for (i=0; i < (1 << cache->order); i++) { |
164 | for (i=0; i < (1 << cache->order); i++) { |
156 | frame = ADDR2FRAME(zone, KA2PA((__address)(data+i*PAGE_SIZE))); |
165 | frame = ADDR2FRAME(zone, KA2PA((__address)(data+i*PAGE_SIZE))); |
157 | frame->parent = slab; |
166 | frame->parent = slab; |
158 | } |
167 | } |
159 | 168 | ||
160 | slab->start = data; |
169 | slab->start = data; |
161 | slab->available = cache->objects; |
170 | slab->available = cache->objects; |
162 | slab->nextavail = 0; |
171 | slab->nextavail = 0; |
163 | slab->cache = cache; |
172 | slab->cache = cache; |
164 | 173 | ||
165 | for (i=0; i<cache->objects;i++) |
174 | for (i=0; i<cache->objects;i++) |
166 | *((int *) (slab->start + i*cache->size)) = i+1; |
175 | *((int *) (slab->start + i*cache->size)) = i+1; |
167 | 176 | ||
168 | atomic_inc(&cache->allocated_slabs); |
177 | atomic_inc(&cache->allocated_slabs); |
169 | return slab; |
178 | return slab; |
170 | } |
179 | } |
171 | 180 | ||
172 | /** |
181 | /** |
173 | * Deallocate space associated with SLAB |
182 | * Deallocate space associated with SLAB |
174 | * |
183 | * |
175 | * @return number of freed frames |
184 | * @return number of freed frames |
176 | */ |
185 | */ |
177 | static count_t slab_space_free(slab_cache_t *cache, slab_t *slab) |
186 | static count_t slab_space_free(slab_cache_t *cache, slab_t *slab) |
178 | { |
187 | { |
179 | frame_free((__address)slab->start); |
188 | frame_free((__address)slab->start); |
180 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) |
189 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) |
181 | slab_free(slab_extern_cache, slab); |
190 | slab_free(slab_extern_cache, slab); |
182 | 191 | ||
183 | atomic_dec(&cache->allocated_slabs); |
192 | atomic_dec(&cache->allocated_slabs); |
184 | 193 | ||
185 | return 1 << cache->order; |
194 | return 1 << cache->order; |
186 | } |
195 | } |
187 | 196 | ||
188 | /** Map object to slab structure */ |
197 | /** Map object to slab structure */ |
189 | static slab_t * obj2slab(void *obj) |
198 | static slab_t * obj2slab(void *obj) |
190 | { |
199 | { |
191 | frame_t *frame; |
200 | frame_t *frame; |
192 | 201 | ||
193 | frame = frame_addr2frame((__address)obj); |
202 | frame = frame_addr2frame((__address)obj); |
194 | return (slab_t *)frame->parent; |
203 | return (slab_t *)frame->parent; |
195 | } |
204 | } |
196 | 205 | ||
197 | /**************************************/ |
206 | /**************************************/ |
198 | /* SLAB functions */ |
207 | /* SLAB functions */ |
199 | 208 | ||
200 | 209 | ||
201 | /** |
210 | /** |
202 | * Return object to slab and call a destructor |
211 | * Return object to slab and call a destructor |
203 | * |
212 | * |
204 | * Assume the cache->lock is held; |
213 | * Assume the cache->lock is held; |
205 | * |
214 | * |
206 | * @param slab If the caller knows directly slab of the object, otherwise NULL |
215 | * @param slab If the caller knows directly slab of the object, otherwise NULL |
207 | * |
216 | * |
208 | * @return Number of freed pages |
217 | * @return Number of freed pages |
209 | */ |
218 | */ |
210 | static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, |
219 | static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, |
211 | slab_t *slab) |
220 | slab_t *slab) |
212 | { |
221 | { |
213 | count_t frames = 0; |
222 | count_t frames = 0; |
214 | 223 | ||
215 | if (!slab) |
224 | if (!slab) |
216 | slab = obj2slab(obj); |
225 | slab = obj2slab(obj); |
217 | 226 | ||
218 | ASSERT(slab->cache == cache); |
227 | ASSERT(slab->cache == cache); |
219 | 228 | ||
220 | *((int *)obj) = slab->nextavail; |
229 | *((int *)obj) = slab->nextavail; |
221 | slab->nextavail = (obj - slab->start)/cache->size; |
230 | slab->nextavail = (obj - slab->start)/cache->size; |
222 | slab->available++; |
231 | slab->available++; |
223 | 232 | ||
224 | /* Move it to correct list */ |
233 | /* Move it to correct list */ |
225 | if (slab->available == 1) { |
234 | if (slab->available == 1) { |
226 | /* It was in full, move to partial */ |
235 | /* It was in full, move to partial */ |
227 | list_remove(&slab->link); |
236 | list_remove(&slab->link); |
228 | list_prepend(&slab->link, &cache->partial_slabs); |
237 | list_prepend(&slab->link, &cache->partial_slabs); |
229 | } |
238 | } |
230 | if (slab->available == cache->objects) { |
239 | if (slab->available == cache->objects) { |
231 | /* Free associated memory */ |
240 | /* Free associated memory */ |
232 | list_remove(&slab->link); |
241 | list_remove(&slab->link); |
233 | /* Avoid deadlock */ |
242 | /* Avoid deadlock */ |
234 | spinlock_unlock(&cache->lock); |
243 | spinlock_unlock(&cache->lock); |
235 | frames = slab_space_free(cache, slab); |
244 | frames = slab_space_free(cache, slab); |
236 | spinlock_lock(&cache->lock); |
245 | spinlock_lock(&cache->lock); |
237 | } |
246 | } |
238 | 247 | ||
239 | return frames; |
248 | return frames; |
240 | } |
249 | } |
241 | 250 | ||
242 | /** |
251 | /** |
243 | * Take new object from slab or create new if needed |
252 | * Take new object from slab or create new if needed |
244 | * |
253 | * |
245 | * Assume cache->lock is held. |
254 | * Assume cache->lock is held. |
246 | * |
255 | * |
247 | * @return Object address or null |
256 | * @return Object address or null |
248 | */ |
257 | */ |
249 | static void * slab_obj_create(slab_cache_t *cache, int flags) |
258 | static void * slab_obj_create(slab_cache_t *cache, int flags) |
250 | { |
259 | { |
251 | slab_t *slab; |
260 | slab_t *slab; |
252 | void *obj; |
261 | void *obj; |
253 | 262 | ||
254 | if (list_empty(&cache->partial_slabs)) { |
263 | if (list_empty(&cache->partial_slabs)) { |
255 | /* Allow recursion and reclaiming |
264 | /* Allow recursion and reclaiming |
256 | * - this should work, as the SLAB control structures |
265 | * - this should work, as the SLAB control structures |
257 | * are small and do not need to allocte with anything |
266 | * are small and do not need to allocte with anything |
258 | * other ten frame_alloc when they are allocating, |
267 | * other ten frame_alloc when they are allocating, |
259 | * that's why we should get recursion at most 1-level deep |
268 | * that's why we should get recursion at most 1-level deep |
260 | */ |
269 | */ |
261 | spinlock_unlock(&cache->lock); |
270 | spinlock_unlock(&cache->lock); |
262 | slab = slab_space_alloc(cache, flags); |
271 | slab = slab_space_alloc(cache, flags); |
263 | spinlock_lock(&cache->lock); |
272 | spinlock_lock(&cache->lock); |
264 | if (!slab) { |
273 | if (!slab) { |
265 | return NULL; |
274 | return NULL; |
266 | } |
275 | } |
267 | } else { |
276 | } else { |
268 | slab = list_get_instance(cache->partial_slabs.next, |
277 | slab = list_get_instance(cache->partial_slabs.next, |
269 | slab_t, |
278 | slab_t, |
270 | link); |
279 | link); |
271 | list_remove(&slab->link); |
280 | list_remove(&slab->link); |
272 | } |
281 | } |
273 | obj = slab->start + slab->nextavail * cache->size; |
282 | obj = slab->start + slab->nextavail * cache->size; |
274 | slab->nextavail = *((int *)obj); |
283 | slab->nextavail = *((int *)obj); |
275 | slab->available--; |
284 | slab->available--; |
276 | if (! slab->available) |
285 | if (! slab->available) |
277 | list_prepend(&slab->link, &cache->full_slabs); |
286 | list_prepend(&slab->link, &cache->full_slabs); |
278 | else |
287 | else |
279 | list_prepend(&slab->link, &cache->partial_slabs); |
288 | list_prepend(&slab->link, &cache->partial_slabs); |
280 | return obj; |
289 | return obj; |
281 | } |
290 | } |
282 | 291 | ||
283 | /**************************************/ |
292 | /**************************************/ |
284 | /* CPU-Cache slab functions */ |
293 | /* CPU-Cache slab functions */ |
285 | 294 | ||
286 | /** |
295 | /** |
287 | * Free all objects in magazine and free memory associated with magazine |
296 | * Free all objects in magazine and free memory associated with magazine |
288 | * |
297 | * |
289 | * Assume mag_cache[cpu].lock is locked |
298 | * Assume mag_cache[cpu].lock is locked |
290 | * |
299 | * |
291 | * @return Number of freed pages |
300 | * @return Number of freed pages |
292 | */ |
301 | */ |
293 | static count_t magazine_destroy(slab_cache_t *cache, |
302 | static count_t magazine_destroy(slab_cache_t *cache, |
294 | slab_magazine_t *mag) |
303 | slab_magazine_t *mag) |
295 | { |
304 | { |
296 | int i; |
305 | int i; |
297 | count_t frames = 0; |
306 | count_t frames = 0; |
298 | 307 | ||
299 | for (i=0;i < mag->busy; i++) { |
308 | for (i=0;i < mag->busy; i++) { |
300 | frames += slab_obj_destroy(cache, mag->objs[i], NULL); |
309 | frames += slab_obj_destroy(cache, mag->objs[i], NULL); |
301 | atomic_dec(&cache->cached_objs); |
310 | atomic_dec(&cache->cached_objs); |
302 | } |
311 | } |
303 | 312 | ||
304 | slab_free(&mag_cache, mag); |
313 | slab_free(&mag_cache, mag); |
305 | 314 | ||
306 | return frames; |
315 | return frames; |
307 | } |
316 | } |
308 | 317 | ||
309 | /** |
318 | /** |
310 | * Find full magazine, set it as current and return it |
319 | * Find full magazine, set it as current and return it |
311 | * |
320 | * |
312 | * Assume cpu_magazine lock is held |
321 | * Assume cpu_magazine lock is held |
313 | */ |
322 | */ |
314 | static slab_magazine_t * get_full_current_mag(slab_cache_t *cache) |
323 | static slab_magazine_t * get_full_current_mag(slab_cache_t *cache) |
315 | { |
324 | { |
316 | slab_magazine_t *cmag, *lastmag, *newmag; |
325 | slab_magazine_t *cmag, *lastmag, *newmag; |
317 | 326 | ||
318 | cmag = cache->mag_cache[CPU->id].current; |
327 | cmag = cache->mag_cache[CPU->id].current; |
319 | lastmag = cache->mag_cache[CPU->id].last; |
328 | lastmag = cache->mag_cache[CPU->id].last; |
320 | if (cmag) { /* First try local CPU magazines */ |
329 | if (cmag) { /* First try local CPU magazines */ |
321 | if (cmag->busy) |
330 | if (cmag->busy) |
322 | return cmag; |
331 | return cmag; |
323 | 332 | ||
324 | if (lastmag && lastmag->busy) { |
333 | if (lastmag && lastmag->busy) { |
325 | cache->mag_cache[CPU->id].current = lastmag; |
334 | cache->mag_cache[CPU->id].current = lastmag; |
326 | cache->mag_cache[CPU->id].last = cmag; |
335 | cache->mag_cache[CPU->id].last = cmag; |
327 | return lastmag; |
336 | return lastmag; |
328 | } |
337 | } |
329 | } |
338 | } |
330 | /* Local magazines are empty, import one from magazine list */ |
339 | /* Local magazines are empty, import one from magazine list */ |
331 | spinlock_lock(&cache->lock); |
340 | spinlock_lock(&cache->lock); |
332 | if (list_empty(&cache->magazines)) { |
341 | if (list_empty(&cache->magazines)) { |
333 | spinlock_unlock(&cache->lock); |
342 | spinlock_unlock(&cache->lock); |
334 | return NULL; |
343 | return NULL; |
335 | } |
344 | } |
336 | newmag = list_get_instance(cache->magazines.next, |
345 | newmag = list_get_instance(cache->magazines.next, |
337 | slab_magazine_t, |
346 | slab_magazine_t, |
338 | link); |
347 | link); |
339 | list_remove(&newmag->link); |
348 | list_remove(&newmag->link); |
340 | spinlock_unlock(&cache->lock); |
349 | spinlock_unlock(&cache->lock); |
341 | 350 | ||
342 | if (lastmag) |
351 | if (lastmag) |
343 | slab_free(&mag_cache, lastmag); |
352 | slab_free(&mag_cache, lastmag); |
344 | cache->mag_cache[CPU->id].last = cmag; |
353 | cache->mag_cache[CPU->id].last = cmag; |
345 | cache->mag_cache[CPU->id].current = newmag; |
354 | cache->mag_cache[CPU->id].current = newmag; |
346 | return newmag; |
355 | return newmag; |
347 | } |
356 | } |
348 | 357 | ||
349 | /** |
358 | /** |
350 | * Try to find object in CPU-cache magazines |
359 | * Try to find object in CPU-cache magazines |
351 | * |
360 | * |
352 | * @return Pointer to object or NULL if not available |
361 | * @return Pointer to object or NULL if not available |
353 | */ |
362 | */ |
354 | static void * magazine_obj_get(slab_cache_t *cache) |
363 | static void * magazine_obj_get(slab_cache_t *cache) |
355 | { |
364 | { |
356 | slab_magazine_t *mag; |
365 | slab_magazine_t *mag; |
357 | void *obj; |
366 | void *obj; |
358 | 367 | ||
359 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
368 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
360 | 369 | ||
361 | mag = get_full_current_mag(cache); |
370 | mag = get_full_current_mag(cache); |
362 | if (!mag) { |
371 | if (!mag) { |
363 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
372 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
364 | return NULL; |
373 | return NULL; |
365 | } |
374 | } |
366 | obj = mag->objs[--mag->busy]; |
375 | obj = mag->objs[--mag->busy]; |
367 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
376 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
368 | atomic_dec(&cache->cached_objs); |
377 | atomic_dec(&cache->cached_objs); |
369 | 378 | ||
370 | return obj; |
379 | return obj; |
371 | } |
380 | } |
372 | 381 | ||
373 | /** |
382 | /** |
374 | * Assure that the current magazine is empty, return pointer to it, or NULL if |
383 | * Assure that the current magazine is empty, return pointer to it, or NULL if |
375 | * no empty magazine is available and cannot be allocated |
384 | * no empty magazine is available and cannot be allocated |
376 | * |
385 | * |
377 | * We have 2 magazines bound to processor. |
386 | * We have 2 magazines bound to processor. |
378 | * First try the current. |
387 | * First try the current. |
379 | * If full, try the last. |
388 | * If full, try the last. |
380 | * If full, put to magazines list. |
389 | * If full, put to magazines list. |
381 | * allocate new, exchange last & current |
390 | * allocate new, exchange last & current |
382 | * |
391 | * |
383 | */ |
392 | */ |
384 | static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache) |
393 | static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache) |
385 | { |
394 | { |
386 | slab_magazine_t *cmag,*lastmag,*newmag; |
395 | slab_magazine_t *cmag,*lastmag,*newmag; |
387 | 396 | ||
388 | cmag = cache->mag_cache[CPU->id].current; |
397 | cmag = cache->mag_cache[CPU->id].current; |
389 | lastmag = cache->mag_cache[CPU->id].last; |
398 | lastmag = cache->mag_cache[CPU->id].last; |
390 | 399 | ||
391 | if (cmag) { |
400 | if (cmag) { |
392 | if (cmag->busy < cmag->size) |
401 | if (cmag->busy < cmag->size) |
393 | return cmag; |
402 | return cmag; |
394 | if (lastmag && lastmag->busy < lastmag->size) { |
403 | if (lastmag && lastmag->busy < lastmag->size) { |
395 | cache->mag_cache[CPU->id].last = cmag; |
404 | cache->mag_cache[CPU->id].last = cmag; |
396 | cache->mag_cache[CPU->id].current = lastmag; |
405 | cache->mag_cache[CPU->id].current = lastmag; |
397 | return lastmag; |
406 | return lastmag; |
398 | } |
407 | } |
399 | } |
408 | } |
400 | /* current | last are full | nonexistent, allocate new */ |
409 | /* current | last are full | nonexistent, allocate new */ |
401 | /* We do not want to sleep just because of caching */ |
410 | /* We do not want to sleep just because of caching */ |
402 | /* Especially we do not want reclaiming to start, as |
411 | /* Especially we do not want reclaiming to start, as |
403 | * this would deadlock */ |
412 | * this would deadlock */ |
404 | newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); |
413 | newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); |
405 | if (!newmag) |
414 | if (!newmag) |
406 | return NULL; |
415 | return NULL; |
407 | newmag->size = SLAB_MAG_SIZE; |
416 | newmag->size = SLAB_MAG_SIZE; |
408 | newmag->busy = 0; |
417 | newmag->busy = 0; |
409 | 418 | ||
410 | /* Flush last to magazine list */ |
419 | /* Flush last to magazine list */ |
411 | if (lastmag) |
420 | if (lastmag) |
412 | list_prepend(&lastmag->link, &cache->magazines); |
421 | list_prepend(&lastmag->link, &cache->magazines); |
413 | /* Move current as last, save new as current */ |
422 | /* Move current as last, save new as current */ |
414 | cache->mag_cache[CPU->id].last = cmag; |
423 | cache->mag_cache[CPU->id].last = cmag; |
415 | cache->mag_cache[CPU->id].current = newmag; |
424 | cache->mag_cache[CPU->id].current = newmag; |
416 | 425 | ||
417 | return newmag; |
426 | return newmag; |
418 | } |
427 | } |
419 | 428 | ||
420 | /** |
429 | /** |
421 | * Put object into CPU-cache magazine |
430 | * Put object into CPU-cache magazine |
422 | * |
431 | * |
423 | * @return 0 - success, -1 - could not get memory |
432 | * @return 0 - success, -1 - could not get memory |
424 | */ |
433 | */ |
425 | static int magazine_obj_put(slab_cache_t *cache, void *obj) |
434 | static int magazine_obj_put(slab_cache_t *cache, void *obj) |
426 | { |
435 | { |
427 | slab_magazine_t *mag; |
436 | slab_magazine_t *mag; |
428 | 437 | ||
429 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
438 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
430 | 439 | ||
431 | mag = make_empty_current_mag(cache); |
440 | mag = make_empty_current_mag(cache); |
432 | if (!mag) { |
441 | if (!mag) { |
433 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
442 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
434 | return -1; |
443 | return -1; |
435 | } |
444 | } |
436 | 445 | ||
437 | mag->objs[mag->busy++] = obj; |
446 | mag->objs[mag->busy++] = obj; |
438 | 447 | ||
439 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
448 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
440 | atomic_inc(&cache->cached_objs); |
449 | atomic_inc(&cache->cached_objs); |
441 | return 0; |
450 | return 0; |
442 | } |
451 | } |
443 | 452 | ||
444 | 453 | ||
445 | /**************************************/ |
454 | /**************************************/ |
446 | /* SLAB CACHE functions */ |
455 | /* SLAB CACHE functions */ |
447 | 456 | ||
448 | /** Return number of objects that fit in certain cache size */ |
457 | /** Return number of objects that fit in certain cache size */ |
449 | static int comp_objects(slab_cache_t *cache) |
458 | static int comp_objects(slab_cache_t *cache) |
450 | { |
459 | { |
451 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
460 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
452 | return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size; |
461 | return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size; |
453 | else |
462 | else |
454 | return (PAGE_SIZE << cache->order) / cache->size; |
463 | return (PAGE_SIZE << cache->order) / cache->size; |
455 | } |
464 | } |
456 | 465 | ||
457 | /** Return wasted space in slab */ |
466 | /** Return wasted space in slab */ |
458 | static int badness(slab_cache_t *cache) |
467 | static int badness(slab_cache_t *cache) |
459 | { |
468 | { |
460 | int objects; |
469 | int objects; |
461 | int ssize; |
470 | int ssize; |
462 | 471 | ||
463 | objects = comp_objects(cache); |
472 | objects = comp_objects(cache); |
464 | ssize = PAGE_SIZE << cache->order; |
473 | ssize = PAGE_SIZE << cache->order; |
465 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
474 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
466 | ssize -= sizeof(slab_t); |
475 | ssize -= sizeof(slab_t); |
467 | return ssize - objects*cache->size; |
476 | return ssize - objects*cache->size; |
468 | } |
477 | } |
469 | 478 | ||
470 | /** Initialize allocated memory as a slab cache */ |
479 | /** Initialize allocated memory as a slab cache */ |
471 | static void |
480 | static void |
472 | _slab_cache_create(slab_cache_t *cache, |
481 | _slab_cache_create(slab_cache_t *cache, |
473 | char *name, |
482 | char *name, |
474 | size_t size, |
483 | size_t size, |
475 | size_t align, |
484 | size_t align, |
476 | int (*constructor)(void *obj, int kmflag), |
485 | int (*constructor)(void *obj, int kmflag), |
477 | void (*destructor)(void *obj), |
486 | void (*destructor)(void *obj), |
478 | int flags) |
487 | int flags) |
479 | { |
488 | { |
480 | int i; |
489 | int i; |
- | 490 | int pages; |
|
481 | 491 | ||
482 | memsetb((__address)cache, sizeof(*cache), 0); |
492 | memsetb((__address)cache, sizeof(*cache), 0); |
483 | cache->name = name; |
493 | cache->name = name; |
484 | 494 | ||
485 | if (align < sizeof(__native)) |
495 | if (align < sizeof(__native)) |
486 | align = sizeof(__native); |
496 | align = sizeof(__native); |
487 | size = ALIGN_UP(size, align); |
497 | size = ALIGN_UP(size, align); |
488 | 498 | ||
489 | cache->size = size; |
499 | cache->size = size; |
490 | 500 | ||
491 | cache->constructor = constructor; |
501 | cache->constructor = constructor; |
492 | cache->destructor = destructor; |
502 | cache->destructor = destructor; |
493 | cache->flags = flags; |
503 | cache->flags = flags; |
494 | 504 | ||
495 | list_initialize(&cache->full_slabs); |
505 | list_initialize(&cache->full_slabs); |
496 | list_initialize(&cache->partial_slabs); |
506 | list_initialize(&cache->partial_slabs); |
497 | list_initialize(&cache->magazines); |
507 | list_initialize(&cache->magazines); |
498 | spinlock_initialize(&cache->lock, "cachelock"); |
508 | spinlock_initialize(&cache->lock, "cachelock"); |
499 | if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) { |
509 | if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) { |
500 | for (i=0; i< config.cpu_count; i++) |
510 | for (i=0; i< config.cpu_count; i++) |
501 | spinlock_initialize(&cache->mag_cache[i].lock, |
511 | spinlock_initialize(&cache->mag_cache[i].lock, |
502 | "cpucachelock"); |
512 | "cpucachelock"); |
503 | } |
513 | } |
504 | 514 | ||
505 | /* Compute slab sizes, object counts in slabs etc. */ |
515 | /* Compute slab sizes, object counts in slabs etc. */ |
506 | if (cache->size < SLAB_INSIDE_SIZE) |
516 | if (cache->size < SLAB_INSIDE_SIZE) |
507 | cache->flags |= SLAB_CACHE_SLINSIDE; |
517 | cache->flags |= SLAB_CACHE_SLINSIDE; |
508 | 518 | ||
509 | /* Minimum slab order */ |
519 | /* Minimum slab order */ |
510 | cache->order = (cache->size-1) >> PAGE_WIDTH; |
520 | pages = ((cache->size-1) >> PAGE_WIDTH) + 1; |
- | 521 | cache->order = fnzb(pages); |
|
511 | 522 | ||
512 | while (badness(cache) > SLAB_MAX_BADNESS(cache)) { |
523 | while (badness(cache) > SLAB_MAX_BADNESS(cache)) { |
513 | cache->order += 1; |
524 | cache->order += 1; |
514 | } |
525 | } |
515 | cache->objects = comp_objects(cache); |
526 | cache->objects = comp_objects(cache); |
516 | /* If info fits in, put it inside */ |
527 | /* If info fits in, put it inside */ |
517 | if (badness(cache) > sizeof(slab_t)) |
528 | if (badness(cache) > sizeof(slab_t)) |
518 | cache->flags |= SLAB_CACHE_SLINSIDE; |
529 | cache->flags |= SLAB_CACHE_SLINSIDE; |
519 | 530 | ||
520 | spinlock_lock(&slab_cache_lock); |
531 | spinlock_lock(&slab_cache_lock); |
521 | 532 | ||
522 | list_append(&cache->link, &slab_cache_list); |
533 | list_append(&cache->link, &slab_cache_list); |
523 | 534 | ||
524 | spinlock_unlock(&slab_cache_lock); |
535 | spinlock_unlock(&slab_cache_lock); |
525 | } |
536 | } |
526 | 537 | ||
527 | /** Create slab cache */ |
538 | /** Create slab cache */ |
528 | slab_cache_t * slab_cache_create(char *name, |
539 | slab_cache_t * slab_cache_create(char *name, |
529 | size_t size, |
540 | size_t size, |
530 | size_t align, |
541 | size_t align, |
531 | int (*constructor)(void *obj, int kmflag), |
542 | int (*constructor)(void *obj, int kmflag), |
532 | void (*destructor)(void *obj), |
543 | void (*destructor)(void *obj), |
533 | int flags) |
544 | int flags) |
534 | { |
545 | { |
535 | slab_cache_t *cache; |
546 | slab_cache_t *cache; |
536 | 547 | ||
537 | cache = slab_alloc(&slab_cache_cache, 0); |
548 | cache = slab_alloc(&slab_cache_cache, 0); |
538 | _slab_cache_create(cache, name, size, align, constructor, destructor, |
549 | _slab_cache_create(cache, name, size, align, constructor, destructor, |
539 | flags); |
550 | flags); |
540 | return cache; |
551 | return cache; |
541 | } |
552 | } |
542 | 553 | ||
543 | /** |
554 | /** |
544 | * Reclaim space occupied by objects that are already free |
555 | * Reclaim space occupied by objects that are already free |
545 | * |
556 | * |
546 | * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing |
557 | * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing |
547 | * @return Number of freed pages |
558 | * @return Number of freed pages |
548 | */ |
559 | */ |
549 | static count_t _slab_reclaim(slab_cache_t *cache, int flags) |
560 | static count_t _slab_reclaim(slab_cache_t *cache, int flags) |
550 | { |
561 | { |
551 | int i; |
562 | int i; |
552 | slab_magazine_t *mag; |
563 | slab_magazine_t *mag; |
553 | link_t *cur; |
564 | link_t *cur; |
554 | count_t frames = 0; |
565 | count_t frames = 0; |
555 | 566 | ||
556 | if (cache->flags & SLAB_CACHE_NOMAGAZINE) |
567 | if (cache->flags & SLAB_CACHE_NOMAGAZINE) |
557 | return 0; /* Nothing to do */ |
568 | return 0; /* Nothing to do */ |
558 | 569 | ||
559 | /* First lock all cpu caches, then the complete cache lock */ |
570 | /* First lock all cpu caches, then the complete cache lock */ |
560 | if (flags & SLAB_RECLAIM_ALL) { |
571 | if (flags & SLAB_RECLAIM_ALL) { |
561 | for (i=0; i < config.cpu_count; i++) |
572 | for (i=0; i < config.cpu_count; i++) |
562 | spinlock_lock(&cache->mag_cache[i].lock); |
573 | spinlock_lock(&cache->mag_cache[i].lock); |
563 | } |
574 | } |
564 | spinlock_lock(&cache->lock); |
575 | spinlock_lock(&cache->lock); |
565 | 576 | ||
566 | if (flags & SLAB_RECLAIM_ALL) { |
577 | if (flags & SLAB_RECLAIM_ALL) { |
567 | /* Aggressive memfree */ |
578 | /* Aggressive memfree */ |
568 | /* Destroy CPU magazines */ |
579 | /* Destroy CPU magazines */ |
569 | for (i=0; i<config.cpu_count; i++) { |
580 | for (i=0; i<config.cpu_count; i++) { |
570 | mag = cache->mag_cache[i].current; |
581 | mag = cache->mag_cache[i].current; |
571 | if (mag) |
582 | if (mag) |
572 | frames += magazine_destroy(cache, mag); |
583 | frames += magazine_destroy(cache, mag); |
573 | cache->mag_cache[i].current = NULL; |
584 | cache->mag_cache[i].current = NULL; |
574 | 585 | ||
575 | mag = cache->mag_cache[i].last; |
586 | mag = cache->mag_cache[i].last; |
576 | if (mag) |
587 | if (mag) |
577 | frames += magazine_destroy(cache, mag); |
588 | frames += magazine_destroy(cache, mag); |
578 | cache->mag_cache[i].last = NULL; |
589 | cache->mag_cache[i].last = NULL; |
579 | } |
590 | } |
580 | } |
591 | } |
581 | /* Destroy full magazines */ |
592 | /* Destroy full magazines */ |
582 | cur=cache->magazines.prev; |
593 | cur=cache->magazines.prev; |
583 | 594 | ||
584 | while (cur != &cache->magazines) { |
595 | while (cur != &cache->magazines) { |
585 | mag = list_get_instance(cur, slab_magazine_t, link); |
596 | mag = list_get_instance(cur, slab_magazine_t, link); |
586 | 597 | ||
587 | cur = cur->prev; |
598 | cur = cur->prev; |
588 | list_remove(&mag->link); |
599 | list_remove(&mag->link); |
589 | frames += magazine_destroy(cache,mag); |
600 | frames += magazine_destroy(cache,mag); |
590 | /* If we do not do full reclaim, break |
601 | /* If we do not do full reclaim, break |
591 | * as soon as something is freed */ |
602 | * as soon as something is freed */ |
592 | if (!(flags & SLAB_RECLAIM_ALL) && frames) |
603 | if (!(flags & SLAB_RECLAIM_ALL) && frames) |
593 | break; |
604 | break; |
594 | } |
605 | } |
595 | 606 | ||
596 | spinlock_unlock(&cache->lock); |
607 | spinlock_unlock(&cache->lock); |
597 | if (flags & SLAB_RECLAIM_ALL) { |
608 | if (flags & SLAB_RECLAIM_ALL) { |
598 | for (i=0; i < config.cpu_count; i++) |
609 | for (i=0; i < config.cpu_count; i++) |
599 | spinlock_unlock(&cache->mag_cache[i].lock); |
610 | spinlock_unlock(&cache->mag_cache[i].lock); |
600 | } |
611 | } |
601 | 612 | ||
602 | return frames; |
613 | return frames; |
603 | } |
614 | } |
604 | 615 | ||
605 | /** Check that there are no slabs and remove cache from system */ |
616 | /** Check that there are no slabs and remove cache from system */ |
606 | void slab_cache_destroy(slab_cache_t *cache) |
617 | void slab_cache_destroy(slab_cache_t *cache) |
607 | { |
618 | { |
608 | /* Do not lock anything, we assume the software is correct and |
619 | /* Do not lock anything, we assume the software is correct and |
609 | * does not touch the cache when it decides to destroy it */ |
620 | * does not touch the cache when it decides to destroy it */ |
610 | 621 | ||
611 | /* Destroy all magazines */ |
622 | /* Destroy all magazines */ |
612 | _slab_reclaim(cache, SLAB_RECLAIM_ALL); |
623 | _slab_reclaim(cache, SLAB_RECLAIM_ALL); |
613 | 624 | ||
614 | /* All slabs must be empty */ |
625 | /* All slabs must be empty */ |
615 | if (!list_empty(&cache->full_slabs) \ |
626 | if (!list_empty(&cache->full_slabs) \ |
616 | || !list_empty(&cache->partial_slabs)) |
627 | || !list_empty(&cache->partial_slabs)) |
617 | panic("Destroying cache that is not empty."); |
628 | panic("Destroying cache that is not empty."); |
618 | 629 | ||
619 | spinlock_lock(&slab_cache_lock); |
630 | spinlock_lock(&slab_cache_lock); |
620 | list_remove(&cache->link); |
631 | list_remove(&cache->link); |
621 | spinlock_unlock(&slab_cache_lock); |
632 | spinlock_unlock(&slab_cache_lock); |
622 | 633 | ||
623 | slab_free(&slab_cache_cache, cache); |
634 | slab_free(&slab_cache_cache, cache); |
624 | } |
635 | } |
625 | 636 | ||
626 | /** Allocate new object from cache - if no flags given, always returns |
637 | /** Allocate new object from cache - if no flags given, always returns |
627 | memory */ |
638 | memory */ |
628 | void * slab_alloc(slab_cache_t *cache, int flags) |
639 | void * slab_alloc(slab_cache_t *cache, int flags) |
629 | { |
640 | { |
630 | ipl_t ipl; |
641 | ipl_t ipl; |
631 | void *result = NULL; |
642 | void *result = NULL; |
632 | 643 | ||
633 | /* Disable interrupts to avoid deadlocks with interrupt handlers */ |
644 | /* Disable interrupts to avoid deadlocks with interrupt handlers */ |
634 | ipl = interrupts_disable(); |
645 | ipl = interrupts_disable(); |
635 | 646 | ||
636 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) |
647 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE) && CPU) |
637 | result = magazine_obj_get(cache); |
648 | result = magazine_obj_get(cache); |
638 | 649 | ||
639 | if (!result) { |
650 | if (!result) { |
640 | spinlock_lock(&cache->lock); |
651 | spinlock_lock(&cache->lock); |
641 | result = slab_obj_create(cache, flags); |
652 | result = slab_obj_create(cache, flags); |
642 | spinlock_unlock(&cache->lock); |
653 | spinlock_unlock(&cache->lock); |
643 | } |
654 | } |
644 | 655 | ||
645 | interrupts_restore(ipl); |
656 | interrupts_restore(ipl); |
646 | 657 | ||
647 | if (result) |
658 | if (result) |
648 | atomic_inc(&cache->allocated_objs); |
659 | atomic_inc(&cache->allocated_objs); |
649 | 660 | ||
650 | return result; |
661 | return result; |
651 | } |
662 | } |
652 | 663 | ||
653 | /** Return object to cache */ |
664 | /** Return object to cache, use slab if known */ |
654 | void slab_free(slab_cache_t *cache, void *obj) |
665 | static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) |
655 | { |
666 | { |
656 | ipl_t ipl; |
667 | ipl_t ipl; |
657 | 668 | ||
658 | ipl = interrupts_disable(); |
669 | ipl = interrupts_disable(); |
659 | 670 | ||
660 | if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \ |
671 | if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \ |
- | 672 | || !CPU \ |
|
661 | || magazine_obj_put(cache, obj)) { |
673 | || magazine_obj_put(cache, obj)) { |
662 | 674 | ||
663 | spinlock_lock(&cache->lock); |
675 | spinlock_lock(&cache->lock); |
664 | slab_obj_destroy(cache, obj, NULL); |
676 | slab_obj_destroy(cache, obj, slab); |
665 | spinlock_unlock(&cache->lock); |
677 | spinlock_unlock(&cache->lock); |
666 | } |
678 | } |
667 | interrupts_restore(ipl); |
679 | interrupts_restore(ipl); |
668 | atomic_dec(&cache->allocated_objs); |
680 | atomic_dec(&cache->allocated_objs); |
669 | } |
681 | } |
670 | 682 | ||
- | 683 | /** Return slab object to cache */ |
|
- | 684 | void slab_free(slab_cache_t *cache, void *obj) |
|
- | 685 | { |
|
- | 686 | _slab_free(cache,obj,NULL); |
|
- | 687 | } |
|
- | 688 | ||
671 | /* Go through all caches and reclaim what is possible */ |
689 | /* Go through all caches and reclaim what is possible */ |
672 | count_t slab_reclaim(int flags) |
690 | count_t slab_reclaim(int flags) |
673 | { |
691 | { |
674 | slab_cache_t *cache; |
692 | slab_cache_t *cache; |
675 | link_t *cur; |
693 | link_t *cur; |
676 | count_t frames = 0; |
694 | count_t frames = 0; |
677 | 695 | ||
678 | spinlock_lock(&slab_cache_lock); |
696 | spinlock_lock(&slab_cache_lock); |
679 | 697 | ||
680 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
698 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
681 | cache = list_get_instance(cur, slab_cache_t, link); |
699 | cache = list_get_instance(cur, slab_cache_t, link); |
682 | frames += _slab_reclaim(cache, flags); |
700 | frames += _slab_reclaim(cache, flags); |
683 | } |
701 | } |
684 | 702 | ||
685 | spinlock_unlock(&slab_cache_lock); |
703 | spinlock_unlock(&slab_cache_lock); |
686 | 704 | ||
687 | return frames; |
705 | return frames; |
688 | } |
706 | } |
689 | 707 | ||
690 | 708 | ||
691 | /* Print list of slabs */ |
709 | /* Print list of slabs */ |
692 | void slab_print_list(void) |
710 | void slab_print_list(void) |
693 | { |
711 | { |
694 | slab_cache_t *cache; |
712 | slab_cache_t *cache; |
695 | link_t *cur; |
713 | link_t *cur; |
696 | 714 | ||
697 | spinlock_lock(&slab_cache_lock); |
715 | spinlock_lock(&slab_cache_lock); |
698 | printf("SLAB name\tOsize\tPages\tObj/pg\tSlabs\tCached\tAllocobjs\tCtl\n"); |
716 | printf("SLAB name\tOsize\tPages\tObj/pg\tSlabs\tCached\tAllocobjs\tCtl\n"); |
699 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
717 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
700 | cache = list_get_instance(cur, slab_cache_t, link); |
718 | cache = list_get_instance(cur, slab_cache_t, link); |
701 | printf("%s\t%d\t%d\t%d\t%d\t%d\t%d\t\t%s\n", cache->name, cache->size, |
719 | printf("%s\t%d\t%d\t%d\t%d\t%d\t%d\t\t%s\n", cache->name, cache->size, |
702 | (1 << cache->order), cache->objects, |
720 | (1 << cache->order), cache->objects, |
703 | atomic_get(&cache->allocated_slabs), |
721 | atomic_get(&cache->allocated_slabs), |
704 | atomic_get(&cache->cached_objs), |
722 | atomic_get(&cache->cached_objs), |
705 | atomic_get(&cache->allocated_objs), |
723 | atomic_get(&cache->allocated_objs), |
706 | cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out"); |
724 | cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out"); |
707 | } |
725 | } |
708 | spinlock_unlock(&slab_cache_lock); |
726 | spinlock_unlock(&slab_cache_lock); |
709 | } |
727 | } |
710 | 728 | ||
711 | void slab_cache_init(void) |
729 | void slab_cache_init(void) |
712 | { |
730 | { |
- | 731 | int i, size; |
|
- | 732 | ||
713 | /* Initialize magazine cache */ |
733 | /* Initialize magazine cache */ |
714 | _slab_cache_create(&mag_cache, |
734 | _slab_cache_create(&mag_cache, |
715 | "slab_magazine", |
735 | "slab_magazine", |
716 | sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*), |
736 | sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*), |
717 | sizeof(__address), |
737 | sizeof(__address), |
718 | NULL, NULL, |
738 | NULL, NULL, |
719 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
739 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
720 | /* Initialize slab_cache cache */ |
740 | /* Initialize slab_cache cache */ |
721 | _slab_cache_create(&slab_cache_cache, |
741 | _slab_cache_create(&slab_cache_cache, |
722 | "slab_cache", |
742 | "slab_cache", |
723 | sizeof(slab_cache_cache) + config.cpu_count*sizeof(slab_cache_cache.mag_cache[0]), |
743 | sizeof(slab_cache_cache) + config.cpu_count*sizeof(slab_cache_cache.mag_cache[0]), |
724 | sizeof(__address), |
744 | sizeof(__address), |
725 | NULL, NULL, |
745 | NULL, NULL, |
726 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
746 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
727 | /* Initialize external slab cache */ |
747 | /* Initialize external slab cache */ |
728 | slab_extern_cache = slab_cache_create("slab_extern", |
748 | slab_extern_cache = slab_cache_create("slab_extern", |
729 | sizeof(slab_t), |
749 | sizeof(slab_t), |
730 | 0, NULL, NULL, |
750 | 0, NULL, NULL, |
731 | SLAB_CACHE_SLINSIDE); |
751 | SLAB_CACHE_SLINSIDE); |
732 | 752 | ||
733 | /* Initialize structures for malloc */ |
753 | /* Initialize structures for malloc */ |
- | 754 | for (i=0, size=(1<<SLAB_MIN_MALLOC_W); |
|
- | 755 | i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1); |
|
- | 756 | i++, size <<= 1) { |
|
- | 757 | malloc_caches[i] = slab_cache_create(malloc_names[i], |
|
- | 758 | size, 0, |
|
- | 759 | NULL,NULL,0); |
|
- | 760 | } |
|
- | 761 | } |
|
- | 762 | ||
- | 763 | /**************************************/ |
|
- | 764 | /* kalloc/kfree functions */ |
|
- | 765 | void * kalloc(unsigned int size, int flags) |
|
- | 766 | { |
|
- | 767 | int idx; |
|
- | 768 | ||
- | 769 | ASSERT( size && size <= (1 << SLAB_MAX_MALLOC_W)); |
|
- | 770 | ||
- | 771 | if (size < (1 << SLAB_MIN_MALLOC_W)) |
|
- | 772 | size = (1 << SLAB_MIN_MALLOC_W); |
|
- | 773 | ||
- | 774 | idx = fnzb(size-1) - SLAB_MIN_MALLOC_W + 1; |
|
- | 775 | ||
- | 776 | return slab_alloc(malloc_caches[idx], flags); |
|
- | 777 | } |
|
- | 778 | ||
- | 779 | ||
- | 780 | void kfree(void *obj) |
|
- | 781 | { |
|
- | 782 | slab_t *slab = obj2slab(obj); |
|
- | 783 | ||
- | 784 | _slab_free(slab->cache, obj, slab); |
|
734 | } |
785 | } |
735 | 786 |