Rev 768 | Rev 771 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 768 | Rev 769 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2006 Ondrej Palkovsky |
2 | * Copyright (C) 2006 Ondrej Palkovsky |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
- | 29 | /* |
|
- | 30 | * The SLAB allocator is closely modelled after Opensolaris SLAB allocator |
|
- | 31 | * http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/ |
|
- | 32 | * |
|
- | 33 | * with the following exceptions: |
|
- | 34 | * - empty SLABS are deallocated immediately |
|
- | 35 | * (in Linux they are kept in linked list, in Solaris ???) |
|
- | 36 | * - empty magazines are deallocated when not needed |
|
- | 37 | * (in Solaris they are held in linked list in slab cache) |
|
- | 38 | * |
|
- | 39 | * Following features are not currently supported but would be easy to do: |
|
- | 40 | * - cache coloring |
|
- | 41 | * - dynamic magazine growing (different magazine sizes are already |
|
- | 42 | * supported, but we would need to adjust allocating strategy) |
|
- | 43 | * |
|
- | 44 | * The SLAB allocator supports per-CPU caches ('magazines') to facilitate |
|
- | 45 | * good SMP scaling. |
|
- | 46 | * |
|
- | 47 | * When a new object is being allocated, it is first checked, if it is |
|
- | 48 | * available in CPU-bound magazine. If it is not found there, it is |
|
- | 49 | * allocated from CPU-shared SLAB - if partial full is found, it is used, |
|
- | 50 | * otherwise a new one is allocated. |
|
- | 51 | * |
|
- | 52 | * When an object is being deallocated, it is put to CPU-bound magazine. |
|
- | 53 | * If there is no such magazine, new one is allocated (if it fails, |
|
- | 54 | * the object is deallocated into SLAB). If the magazine is full, it is |
|
- | 55 | * put into cpu-shared list of magazines and new one is allocated. |
|
- | 56 | * |
|
- | 57 | * The CPU-bound magazine is actually a pair of magazine to avoid |
|
- | 58 | * thrashing when somebody is allocating/deallocating 1 item at the magazine |
|
- | 59 | * size boundary. LIFO order is enforced, which should avoid fragmentation |
|
- | 60 | * as much as possible. |
|
- | 61 | * |
|
- | 62 | * Every cache contains list of full slabs and list of partialy full slabs. |
|
- | 63 | * Empty SLABS are immediately freed (thrashing will be avoided because |
|
- | 64 | * of magazines). |
|
- | 65 | * |
|
- | 66 | * The SLAB information structure is kept inside the data area, if possible. |
|
- | 67 | * The cache can be marked that it should not use magazines. This is used |
|
- | 68 | * only for SLAB related caches to avoid deadlocks and infinite recursion |
|
- | 69 | * (the SLAB allocator uses itself for allocating all it's control structures). |
|
- | 70 | * |
|
- | 71 | * The SLAB allocator allocates lot of space and does not free it. When |
|
- | 72 | * frame allocator fails to allocate the frame, it calls slab_reclaim(). |
|
- | 73 | * It tries 'light reclaim' first, then brutal reclaim. The light reclaim |
|
- | 74 | * releases slabs from cpu-shared magazine-list, until at least 1 slab |
|
- | 75 | * is deallocated in each cache (this algorithm should probably change). |
|
- | 76 | * The brutal reclaim removes all cached objects, even from CPU-bound |
|
- | 77 | * magazines. |
|
- | 78 | * |
|
- | 79 | * |
|
- | 80 | */ |
|
- | 81 | ||
- | 82 | ||
29 | #include <synch/spinlock.h> |
83 | #include <synch/spinlock.h> |
30 | #include <mm/slab.h> |
84 | #include <mm/slab.h> |
31 | #include <list.h> |
85 | #include <list.h> |
32 | #include <memstr.h> |
86 | #include <memstr.h> |
33 | #include <align.h> |
87 | #include <align.h> |
34 | #include <mm/heap.h> |
88 | #include <mm/heap.h> |
35 | #include <mm/frame.h> |
89 | #include <mm/frame.h> |
36 | #include <config.h> |
90 | #include <config.h> |
37 | #include <print.h> |
91 | #include <print.h> |
38 | #include <arch.h> |
92 | #include <arch.h> |
39 | #include <panic.h> |
93 | #include <panic.h> |
40 | #include <debug.h> |
94 | #include <debug.h> |
41 | 95 | ||
42 | SPINLOCK_INITIALIZE(slab_cache_lock); |
96 | SPINLOCK_INITIALIZE(slab_cache_lock); |
43 | LIST_INITIALIZE(slab_cache_list); |
97 | static LIST_INITIALIZE(slab_cache_list); |
44 | - | ||
45 | slab_cache_t mag_cache; |
- | |
46 | 98 | ||
- | 99 | /** Magazine cache */ |
|
- | 100 | static slab_cache_t mag_cache; |
|
- | 101 | /** Cache for cache descriptors */ |
|
- | 102 | static slab_cache_t slab_cache_cache; |
|
- | 103 | ||
- | 104 | /** Cache for external slab descriptors |
|
- | 105 | * This time we want per-cpu cache, so do not make it static |
|
- | 106 | * - using SLAB for internal SLAB structures will not deadlock, |
|
- | 107 | * as all slab structures are 'small' - control structures of |
|
- | 108 | * their caches do not require further allocation |
|
- | 109 | */ |
|
- | 110 | static slab_cache_t *slab_extern_cache; |
|
47 | 111 | ||
- | 112 | /** Slab descriptor */ |
|
48 | typedef struct { |
113 | typedef struct { |
49 | slab_cache_t *cache; /**< Pointer to parent cache */ |
114 | slab_cache_t *cache; /**< Pointer to parent cache */ |
50 | link_t link; /* List of full/partial slabs */ |
115 | link_t link; /* List of full/partial slabs */ |
51 | void *start; /**< Start address of first available item */ |
116 | void *start; /**< Start address of first available item */ |
52 | count_t available; /**< Count of available items in this slab */ |
117 | count_t available; /**< Count of available items in this slab */ |
53 | index_t nextavail; /**< The index of next available item */ |
118 | index_t nextavail; /**< The index of next available item */ |
54 | }slab_t; |
119 | }slab_t; |
55 | 120 | ||
56 | /**************************************/ |
121 | /**************************************/ |
57 | /* SLAB allocation functions */ |
122 | /* SLAB allocation functions */ |
58 | 123 | ||
59 | /** |
124 | /** |
60 | * Allocate frames for slab space and initialize |
125 | * Allocate frames for slab space and initialize |
61 | * |
126 | * |
62 | * TODO: Change slab_t allocation to slab_alloc(????), malloc with flags!! |
- | |
63 | */ |
127 | */ |
64 | static slab_t * slab_space_alloc(slab_cache_t *cache, int flags) |
128 | static slab_t * slab_space_alloc(slab_cache_t *cache, int flags) |
65 | { |
129 | { |
66 | void *data; |
130 | void *data; |
67 | slab_t *slab; |
131 | slab_t *slab; |
68 | size_t fsize; |
132 | size_t fsize; |
69 | int i; |
133 | int i; |
70 | zone_t *zone = NULL; |
134 | zone_t *zone = NULL; |
71 | int status; |
135 | int status; |
72 | frame_t *frame; |
136 | frame_t *frame; |
73 | 137 | ||
74 | data = (void *)frame_alloc(FRAME_KA | flags, cache->order, &status, &zone); |
138 | data = (void *)frame_alloc(FRAME_KA | flags, cache->order, &status, &zone); |
75 | if (status != FRAME_OK) { |
139 | if (status != FRAME_OK) { |
76 | return NULL; |
140 | return NULL; |
77 | } |
141 | } |
78 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) { |
142 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) { |
79 | slab = malloc(sizeof(*slab)); // , flags); |
143 | slab = slab_alloc(slab_extern_cache, flags); |
80 | if (!slab) { |
144 | if (!slab) { |
81 | frame_free((__address)data); |
145 | frame_free((__address)data); |
82 | return NULL; |
146 | return NULL; |
83 | } |
147 | } |
84 | } else { |
148 | } else { |
85 | fsize = (PAGE_SIZE << cache->order); |
149 | fsize = (PAGE_SIZE << cache->order); |
86 | slab = data + fsize - sizeof(*slab); |
150 | slab = data + fsize - sizeof(*slab); |
87 | } |
151 | } |
88 | 152 | ||
89 | /* Fill in slab structures */ |
153 | /* Fill in slab structures */ |
90 | /* TODO: some better way of accessing the frame */ |
154 | /* TODO: some better way of accessing the frame */ |
91 | for (i=0; i < (1 << cache->order); i++) { |
155 | for (i=0; i < (1 << cache->order); i++) { |
92 | frame = ADDR2FRAME(zone, KA2PA((__address)(data+i*PAGE_SIZE))); |
156 | frame = ADDR2FRAME(zone, KA2PA((__address)(data+i*PAGE_SIZE))); |
93 | frame->parent = slab; |
157 | frame->parent = slab; |
94 | } |
158 | } |
95 | 159 | ||
96 | slab->start = data; |
160 | slab->start = data; |
97 | slab->available = cache->objects; |
161 | slab->available = cache->objects; |
98 | slab->nextavail = 0; |
162 | slab->nextavail = 0; |
99 | slab->cache = cache; |
163 | slab->cache = cache; |
100 | 164 | ||
101 | for (i=0; i<cache->objects;i++) |
165 | for (i=0; i<cache->objects;i++) |
102 | *((int *) (slab->start + i*cache->size)) = i+1; |
166 | *((int *) (slab->start + i*cache->size)) = i+1; |
103 | 167 | ||
104 | atomic_inc(&cache->allocated_slabs); |
168 | atomic_inc(&cache->allocated_slabs); |
105 | return slab; |
169 | return slab; |
106 | } |
170 | } |
107 | 171 | ||
108 | /** |
172 | /** |
109 | * Deallocate space associated with SLAB |
173 | * Deallocate space associated with SLAB |
110 | * |
174 | * |
111 | * @return number of freed frames |
175 | * @return number of freed frames |
112 | */ |
176 | */ |
113 | static count_t slab_space_free(slab_cache_t *cache, slab_t *slab) |
177 | static count_t slab_space_free(slab_cache_t *cache, slab_t *slab) |
114 | { |
178 | { |
115 | frame_free((__address)slab->start); |
179 | frame_free((__address)slab->start); |
116 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) |
180 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) |
117 | free(slab); |
181 | slab_free(slab_extern_cache, slab); |
118 | 182 | ||
119 | atomic_dec(&cache->allocated_slabs); |
183 | atomic_dec(&cache->allocated_slabs); |
120 | 184 | ||
121 | return 1 << cache->order; |
185 | return 1 << cache->order; |
122 | } |
186 | } |
123 | 187 | ||
124 | /** Map object to slab structure */ |
188 | /** Map object to slab structure */ |
125 | static slab_t * obj2slab(void *obj) |
189 | static slab_t * obj2slab(void *obj) |
126 | { |
190 | { |
127 | frame_t *frame; |
191 | frame_t *frame; |
128 | 192 | ||
129 | frame = frame_addr2frame((__address)obj); |
193 | frame = frame_addr2frame((__address)obj); |
130 | return (slab_t *)frame->parent; |
194 | return (slab_t *)frame->parent; |
131 | } |
195 | } |
132 | 196 | ||
133 | /**************************************/ |
197 | /**************************************/ |
134 | /* SLAB functions */ |
198 | /* SLAB functions */ |
135 | 199 | ||
136 | 200 | ||
137 | /** |
201 | /** |
138 | * Return object to slab and call a destructor |
202 | * Return object to slab and call a destructor |
139 | * |
203 | * |
140 | * Assume the cache->lock is held; |
204 | * Assume the cache->lock is held; |
141 | * |
205 | * |
142 | * @param slab If the caller knows directly slab of the object, otherwise NULL |
206 | * @param slab If the caller knows directly slab of the object, otherwise NULL |
143 | * |
207 | * |
144 | * @return Number of freed pages |
208 | * @return Number of freed pages |
145 | */ |
209 | */ |
146 | static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, |
210 | static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, |
147 | slab_t *slab) |
211 | slab_t *slab) |
148 | { |
212 | { |
149 | count_t frames = 0; |
213 | count_t frames = 0; |
150 | 214 | ||
151 | if (!slab) |
215 | if (!slab) |
152 | slab = obj2slab(obj); |
216 | slab = obj2slab(obj); |
153 | 217 | ||
154 | ASSERT(slab->cache == cache); |
218 | ASSERT(slab->cache == cache); |
155 | 219 | ||
156 | *((int *)obj) = slab->nextavail; |
220 | *((int *)obj) = slab->nextavail; |
157 | slab->nextavail = (obj - slab->start)/cache->size; |
221 | slab->nextavail = (obj - slab->start)/cache->size; |
158 | slab->available++; |
222 | slab->available++; |
159 | 223 | ||
160 | /* Move it to correct list */ |
224 | /* Move it to correct list */ |
161 | if (slab->available == 1) { |
225 | if (slab->available == 1) { |
162 | /* It was in full, move to partial */ |
226 | /* It was in full, move to partial */ |
163 | list_remove(&slab->link); |
227 | list_remove(&slab->link); |
164 | list_prepend(&slab->link, &cache->partial_slabs); |
228 | list_prepend(&slab->link, &cache->partial_slabs); |
165 | } |
229 | } |
166 | if (slab->available == cache->objects) { |
230 | if (slab->available == cache->objects) { |
167 | /* Free associated memory */ |
231 | /* Free associated memory */ |
168 | list_remove(&slab->link); |
232 | list_remove(&slab->link); |
169 | /* Avoid deadlock */ |
233 | /* Avoid deadlock */ |
170 | spinlock_unlock(&cache->lock); |
234 | spinlock_unlock(&cache->lock); |
171 | frames = slab_space_free(cache, slab); |
235 | frames = slab_space_free(cache, slab); |
172 | spinlock_lock(&cache->lock); |
236 | spinlock_lock(&cache->lock); |
173 | } |
237 | } |
174 | 238 | ||
175 | return frames; |
239 | return frames; |
176 | } |
240 | } |
177 | 241 | ||
178 | /** |
242 | /** |
179 | * Take new object from slab or create new if needed |
243 | * Take new object from slab or create new if needed |
180 | * |
244 | * |
181 | * Assume cache->lock is held. |
245 | * Assume cache->lock is held. |
182 | * |
246 | * |
183 | * @return Object address or null |
247 | * @return Object address or null |
184 | */ |
248 | */ |
185 | static void * slab_obj_create(slab_cache_t *cache, int flags) |
249 | static void * slab_obj_create(slab_cache_t *cache, int flags) |
186 | { |
250 | { |
187 | slab_t *slab; |
251 | slab_t *slab; |
188 | void *obj; |
252 | void *obj; |
189 | 253 | ||
190 | if (list_empty(&cache->partial_slabs)) { |
254 | if (list_empty(&cache->partial_slabs)) { |
191 | /* Allow recursion and reclaiming |
255 | /* Allow recursion and reclaiming |
192 | * - this should work, as the SLAB control structures |
256 | * - this should work, as the SLAB control structures |
193 | * are small and do not need to allocte with anything |
257 | * are small and do not need to allocte with anything |
194 | * other ten frame_alloc when they are allocating, |
258 | * other ten frame_alloc when they are allocating, |
195 | * that's why we should get recursion at most 1-level deep |
259 | * that's why we should get recursion at most 1-level deep |
196 | */ |
260 | */ |
197 | spinlock_unlock(&cache->lock); |
261 | spinlock_unlock(&cache->lock); |
198 | slab = slab_space_alloc(cache, flags); |
262 | slab = slab_space_alloc(cache, flags); |
199 | spinlock_lock(&cache->lock); |
263 | spinlock_lock(&cache->lock); |
200 | if (!slab) { |
264 | if (!slab) { |
201 | return NULL; |
265 | return NULL; |
202 | } |
266 | } |
203 | } else { |
267 | } else { |
204 | slab = list_get_instance(cache->partial_slabs.next, |
268 | slab = list_get_instance(cache->partial_slabs.next, |
205 | slab_t, |
269 | slab_t, |
206 | link); |
270 | link); |
207 | list_remove(&slab->link); |
271 | list_remove(&slab->link); |
208 | } |
272 | } |
209 | obj = slab->start + slab->nextavail * cache->size; |
273 | obj = slab->start + slab->nextavail * cache->size; |
210 | slab->nextavail = *((int *)obj); |
274 | slab->nextavail = *((int *)obj); |
211 | slab->available--; |
275 | slab->available--; |
212 | if (! slab->available) |
276 | if (! slab->available) |
213 | list_prepend(&slab->link, &cache->full_slabs); |
277 | list_prepend(&slab->link, &cache->full_slabs); |
214 | else |
278 | else |
215 | list_prepend(&slab->link, &cache->partial_slabs); |
279 | list_prepend(&slab->link, &cache->partial_slabs); |
216 | return obj; |
280 | return obj; |
217 | } |
281 | } |
218 | 282 | ||
219 | /**************************************/ |
283 | /**************************************/ |
220 | /* CPU-Cache slab functions */ |
284 | /* CPU-Cache slab functions */ |
221 | 285 | ||
222 | /** |
286 | /** |
223 | * Free all objects in magazine and free memory associated with magazine |
287 | * Free all objects in magazine and free memory associated with magazine |
224 | * |
288 | * |
225 | * Assume mag_cache[cpu].lock is locked |
289 | * Assume mag_cache[cpu].lock is locked |
226 | * |
290 | * |
227 | * @return Number of freed pages |
291 | * @return Number of freed pages |
228 | */ |
292 | */ |
229 | static count_t magazine_destroy(slab_cache_t *cache, |
293 | static count_t magazine_destroy(slab_cache_t *cache, |
230 | slab_magazine_t *mag) |
294 | slab_magazine_t *mag) |
231 | { |
295 | { |
232 | int i; |
296 | int i; |
233 | count_t frames = 0; |
297 | count_t frames = 0; |
234 | 298 | ||
235 | for (i=0;i < mag->busy; i++) { |
299 | for (i=0;i < mag->busy; i++) { |
236 | frames += slab_obj_destroy(cache, mag->objs[i], NULL); |
300 | frames += slab_obj_destroy(cache, mag->objs[i], NULL); |
237 | atomic_dec(&cache->cached_objs); |
301 | atomic_dec(&cache->cached_objs); |
238 | } |
302 | } |
239 | 303 | ||
240 | slab_free(&mag_cache, mag); |
304 | slab_free(&mag_cache, mag); |
241 | 305 | ||
242 | return frames; |
306 | return frames; |
243 | } |
307 | } |
244 | 308 | ||
245 | /** |
309 | /** |
- | 310 | * Find full magazine, set it as current and return it |
|
- | 311 | * |
|
- | 312 | * Assume cpu_magazine lock is held |
|
- | 313 | */ |
|
- | 314 | static slab_magazine_t * get_full_current_mag(slab_cache_t *cache) |
|
- | 315 | { |
|
- | 316 | slab_magazine_t *cmag, *lastmag, *newmag; |
|
- | 317 | ||
- | 318 | cmag = cache->mag_cache[CPU->id].current; |
|
- | 319 | lastmag = cache->mag_cache[CPU->id].last; |
|
- | 320 | if (cmag) { /* First try local CPU magazines */ |
|
- | 321 | if (cmag->busy) |
|
- | 322 | return cmag; |
|
- | 323 | ||
- | 324 | if (lastmag && lastmag->busy) { |
|
- | 325 | cache->mag_cache[CPU->id].current = lastmag; |
|
- | 326 | cache->mag_cache[CPU->id].last = cmag; |
|
- | 327 | return lastmag; |
|
- | 328 | } |
|
- | 329 | } |
|
- | 330 | /* Local magazines are empty, import one from magazine list */ |
|
- | 331 | spinlock_lock(&cache->lock); |
|
- | 332 | if (list_empty(&cache->magazines)) { |
|
- | 333 | spinlock_unlock(&cache->lock); |
|
- | 334 | return NULL; |
|
- | 335 | } |
|
- | 336 | newmag = list_get_instance(cache->magazines.next, |
|
- | 337 | slab_magazine_t, |
|
- | 338 | link); |
|
- | 339 | list_remove(&newmag->link); |
|
- | 340 | spinlock_unlock(&cache->lock); |
|
- | 341 | ||
- | 342 | if (lastmag) |
|
- | 343 | slab_free(&mag_cache, lastmag); |
|
- | 344 | cache->mag_cache[CPU->id].last = cmag; |
|
- | 345 | cache->mag_cache[CPU->id].current = newmag; |
|
- | 346 | return newmag; |
|
- | 347 | } |
|
- | 348 | ||
- | 349 | /** |
|
246 | * Try to find object in CPU-cache magazines |
350 | * Try to find object in CPU-cache magazines |
247 | * |
351 | * |
248 | * @return Pointer to object or NULL if not available |
352 | * @return Pointer to object or NULL if not available |
249 | */ |
353 | */ |
250 | static void * magazine_obj_get(slab_cache_t *cache) |
354 | static void * magazine_obj_get(slab_cache_t *cache) |
251 | { |
355 | { |
252 | slab_magazine_t *mag; |
356 | slab_magazine_t *mag; |
253 | void *obj; |
357 | void *obj; |
254 | 358 | ||
255 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
359 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
256 | 360 | ||
257 | mag = cache->mag_cache[CPU->id].current; |
361 | mag = get_full_current_mag(cache); |
258 | if (!mag) |
362 | if (!mag) { |
259 | goto out; |
- | |
260 | - | ||
261 | if (!mag->busy) { |
- | |
262 | /* If current is empty && last exists && not empty, exchange */ |
- | |
263 | if (cache->mag_cache[CPU->id].last \ |
363 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
264 | && cache->mag_cache[CPU->id].last->busy) { |
- | |
265 | cache->mag_cache[CPU->id].current = cache->mag_cache[CPU->id].last; |
- | |
266 | cache->mag_cache[CPU->id].last = mag; |
- | |
267 | mag = cache->mag_cache[CPU->id].current; |
- | |
268 | goto gotit; |
- | |
269 | } |
- | |
270 | /* If still not busy, exchange current with some from |
- | |
271 | * other full magazines */ |
- | |
272 | spinlock_lock(&cache->lock); |
- | |
273 | if (list_empty(&cache->magazines)) { |
- | |
274 | spinlock_unlock(&cache->lock); |
- | |
275 | goto out; |
364 | return NULL; |
276 | } |
- | |
277 | /* Free current magazine and take one from list */ |
- | |
278 | slab_free(&mag_cache, mag); |
- | |
279 | - | ||
280 | mag = list_get_instance(cache->magazines.next, |
- | |
281 | slab_magazine_t, |
- | |
282 | link); |
- | |
283 | list_remove(&mag->link); |
- | |
284 | - | ||
285 | spinlock_unlock(&cache->lock); |
- | |
286 | } |
365 | } |
287 | gotit: |
- | |
288 | obj = mag->objs[--mag->busy]; |
366 | obj = mag->objs[--mag->busy]; |
289 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
367 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
290 | atomic_dec(&cache->cached_objs); |
368 | atomic_dec(&cache->cached_objs); |
291 | 369 | ||
292 | return obj; |
370 | return obj; |
293 | out: |
- | |
294 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
- | |
295 | return NULL; |
- | |
296 | } |
371 | } |
297 | 372 | ||
298 | /** |
373 | /** |
299 | * Assure that the current magazine is empty, return pointer to it, or NULL if |
374 | * Assure that the current magazine is empty, return pointer to it, or NULL if |
300 | * no empty magazine available and cannot be allocated |
375 | * no empty magazine is available and cannot be allocated |
301 | * |
376 | * |
302 | * We have 2 magazines bound to processor. |
377 | * We have 2 magazines bound to processor. |
303 | * First try the current. |
378 | * First try the current. |
304 | * If full, try the last. |
379 | * If full, try the last. |
305 | * If full, put to magazines list. |
380 | * If full, put to magazines list. |
306 | * allocate new, exchange last & current |
381 | * allocate new, exchange last & current |
307 | * |
382 | * |
308 | */ |
383 | */ |
309 | static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache) |
384 | static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache) |
310 | { |
385 | { |
311 | slab_magazine_t *cmag,*lastmag,*newmag; |
386 | slab_magazine_t *cmag,*lastmag,*newmag; |
312 | 387 | ||
313 | cmag = cache->mag_cache[CPU->id].current; |
388 | cmag = cache->mag_cache[CPU->id].current; |
314 | lastmag = cache->mag_cache[CPU->id].last; |
389 | lastmag = cache->mag_cache[CPU->id].last; |
315 | 390 | ||
316 | if (cmag) { |
391 | if (cmag) { |
317 | if (cmag->busy < cmag->size) |
392 | if (cmag->busy < cmag->size) |
318 | return cmag; |
393 | return cmag; |
319 | if (lastmag && lastmag->busy < lastmag->size) { |
394 | if (lastmag && lastmag->busy < lastmag->size) { |
320 | cache->mag_cache[CPU->id].last = cmag; |
395 | cache->mag_cache[CPU->id].last = cmag; |
321 | cache->mag_cache[CPU->id].current = lastmag; |
396 | cache->mag_cache[CPU->id].current = lastmag; |
322 | return lastmag; |
397 | return lastmag; |
323 | } |
398 | } |
324 | } |
399 | } |
325 | /* current | last are full | nonexistent, allocate new */ |
400 | /* current | last are full | nonexistent, allocate new */ |
326 | /* We do not want to sleep just because of caching */ |
401 | /* We do not want to sleep just because of caching */ |
327 | /* Especially we do not want reclaiming to start, as |
402 | /* Especially we do not want reclaiming to start, as |
328 | * this would deadlock */ |
403 | * this would deadlock */ |
329 | newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); |
404 | newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); |
330 | if (!newmag) |
405 | if (!newmag) |
331 | return NULL; |
406 | return NULL; |
332 | newmag->size = SLAB_MAG_SIZE; |
407 | newmag->size = SLAB_MAG_SIZE; |
333 | newmag->busy = 0; |
408 | newmag->busy = 0; |
334 | 409 | ||
335 | /* Flush last to magazine list */ |
410 | /* Flush last to magazine list */ |
336 | if (lastmag) |
411 | if (lastmag) |
337 | list_prepend(&lastmag->link, &cache->magazines); |
412 | list_prepend(&lastmag->link, &cache->magazines); |
338 | /* Move current as last, save new as current */ |
413 | /* Move current as last, save new as current */ |
339 | cache->mag_cache[CPU->id].last = cmag; |
414 | cache->mag_cache[CPU->id].last = cmag; |
340 | cache->mag_cache[CPU->id].current = newmag; |
415 | cache->mag_cache[CPU->id].current = newmag; |
341 | 416 | ||
342 | return newmag; |
417 | return newmag; |
343 | } |
418 | } |
344 | 419 | ||
345 | /** |
420 | /** |
346 | * Put object into CPU-cache magazine |
421 | * Put object into CPU-cache magazine |
347 | * |
422 | * |
348 | * @return 0 - success, -1 - could not get memory |
423 | * @return 0 - success, -1 - could not get memory |
349 | */ |
424 | */ |
350 | static int magazine_obj_put(slab_cache_t *cache, void *obj) |
425 | static int magazine_obj_put(slab_cache_t *cache, void *obj) |
351 | { |
426 | { |
352 | slab_magazine_t *mag; |
427 | slab_magazine_t *mag; |
353 | 428 | ||
354 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
429 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
355 | 430 | ||
356 | mag = make_empty_current_mag(cache); |
431 | mag = make_empty_current_mag(cache); |
357 | if (!mag) |
432 | if (!mag) { |
- | 433 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
|
358 | goto errout; |
434 | return -1; |
- | 435 | } |
|
359 | 436 | ||
360 | mag->objs[mag->busy++] = obj; |
437 | mag->objs[mag->busy++] = obj; |
361 | 438 | ||
362 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
439 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
363 | atomic_inc(&cache->cached_objs); |
440 | atomic_inc(&cache->cached_objs); |
364 | return 0; |
441 | return 0; |
365 | errout: |
- | |
366 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
- | |
367 | return -1; |
- | |
368 | } |
442 | } |
369 | 443 | ||
370 | 444 | ||
371 | /**************************************/ |
445 | /**************************************/ |
372 | /* SLAB CACHE functions */ |
446 | /* SLAB CACHE functions */ |
373 | 447 | ||
374 | /** Return number of objects that fit in certain cache size */ |
448 | /** Return number of objects that fit in certain cache size */ |
375 | static int comp_objects(slab_cache_t *cache) |
449 | static int comp_objects(slab_cache_t *cache) |
376 | { |
450 | { |
377 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
451 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
378 | return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size; |
452 | return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size; |
379 | else |
453 | else |
380 | return (PAGE_SIZE << cache->order) / cache->size; |
454 | return (PAGE_SIZE << cache->order) / cache->size; |
381 | } |
455 | } |
382 | 456 | ||
383 | /** Return wasted space in slab */ |
457 | /** Return wasted space in slab */ |
384 | static int badness(slab_cache_t *cache) |
458 | static int badness(slab_cache_t *cache) |
385 | { |
459 | { |
386 | int objects; |
460 | int objects; |
387 | int ssize; |
461 | int ssize; |
388 | 462 | ||
389 | objects = comp_objects(cache); |
463 | objects = comp_objects(cache); |
390 | ssize = PAGE_SIZE << cache->order; |
464 | ssize = PAGE_SIZE << cache->order; |
391 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
465 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
392 | ssize -= sizeof(slab_t); |
466 | ssize -= sizeof(slab_t); |
393 | return ssize - objects*cache->size; |
467 | return ssize - objects*cache->size; |
394 | } |
468 | } |
395 | 469 | ||
396 | /** Initialize allocated memory as a slab cache */ |
470 | /** Initialize allocated memory as a slab cache */ |
397 | static void |
471 | static void |
398 | _slab_cache_create(slab_cache_t *cache, |
472 | _slab_cache_create(slab_cache_t *cache, |
399 | char *name, |
473 | char *name, |
400 | size_t size, |
474 | size_t size, |
401 | size_t align, |
475 | size_t align, |
402 | int (*constructor)(void *obj, int kmflag), |
476 | int (*constructor)(void *obj, int kmflag), |
403 | void (*destructor)(void *obj), |
477 | void (*destructor)(void *obj), |
404 | int flags) |
478 | int flags) |
405 | { |
479 | { |
406 | int i; |
480 | int i; |
407 | 481 | ||
408 | memsetb((__address)cache, sizeof(*cache), 0); |
482 | memsetb((__address)cache, sizeof(*cache), 0); |
409 | cache->name = name; |
483 | cache->name = name; |
410 | 484 | ||
411 | if (align < sizeof(__native)) |
485 | if (align < sizeof(__native)) |
412 | align = sizeof(__native); |
486 | align = sizeof(__native); |
413 | size = ALIGN_UP(size, align); |
487 | size = ALIGN_UP(size, align); |
414 | 488 | ||
415 | cache->size = size; |
489 | cache->size = size; |
416 | 490 | ||
417 | cache->constructor = constructor; |
491 | cache->constructor = constructor; |
418 | cache->destructor = destructor; |
492 | cache->destructor = destructor; |
419 | cache->flags = flags; |
493 | cache->flags = flags; |
420 | 494 | ||
421 | list_initialize(&cache->full_slabs); |
495 | list_initialize(&cache->full_slabs); |
422 | list_initialize(&cache->partial_slabs); |
496 | list_initialize(&cache->partial_slabs); |
423 | list_initialize(&cache->magazines); |
497 | list_initialize(&cache->magazines); |
424 | spinlock_initialize(&cache->lock, "cachelock"); |
498 | spinlock_initialize(&cache->lock, "cachelock"); |
425 | if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) { |
499 | if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) { |
426 | for (i=0; i< config.cpu_count; i++) |
500 | for (i=0; i< config.cpu_count; i++) |
427 | spinlock_initialize(&cache->mag_cache[i].lock, |
501 | spinlock_initialize(&cache->mag_cache[i].lock, |
428 | "cpucachelock"); |
502 | "cpucachelock"); |
429 | } |
503 | } |
430 | 504 | ||
431 | /* Compute slab sizes, object counts in slabs etc. */ |
505 | /* Compute slab sizes, object counts in slabs etc. */ |
432 | if (cache->size < SLAB_INSIDE_SIZE) |
506 | if (cache->size < SLAB_INSIDE_SIZE) |
433 | cache->flags |= SLAB_CACHE_SLINSIDE; |
507 | cache->flags |= SLAB_CACHE_SLINSIDE; |
434 | 508 | ||
435 | /* Minimum slab order */ |
509 | /* Minimum slab order */ |
436 | cache->order = (cache->size-1) >> PAGE_WIDTH; |
510 | cache->order = (cache->size-1) >> PAGE_WIDTH; |
437 | 511 | ||
438 | while (badness(cache) > SLAB_MAX_BADNESS(cache)) { |
512 | while (badness(cache) > SLAB_MAX_BADNESS(cache)) { |
439 | cache->order += 1; |
513 | cache->order += 1; |
440 | } |
514 | } |
441 | cache->objects = comp_objects(cache); |
515 | cache->objects = comp_objects(cache); |
442 | /* If info fits in, put it inside */ |
516 | /* If info fits in, put it inside */ |
443 | if (badness(cache) > sizeof(slab_t)) |
517 | if (badness(cache) > sizeof(slab_t)) |
444 | cache->flags |= SLAB_CACHE_SLINSIDE; |
518 | cache->flags |= SLAB_CACHE_SLINSIDE; |
445 | 519 | ||
446 | spinlock_lock(&slab_cache_lock); |
520 | spinlock_lock(&slab_cache_lock); |
447 | 521 | ||
448 | list_append(&cache->link, &slab_cache_list); |
522 | list_append(&cache->link, &slab_cache_list); |
449 | 523 | ||
450 | spinlock_unlock(&slab_cache_lock); |
524 | spinlock_unlock(&slab_cache_lock); |
451 | } |
525 | } |
452 | 526 | ||
453 | /** Create slab cache */ |
527 | /** Create slab cache */ |
454 | slab_cache_t * slab_cache_create(char *name, |
528 | slab_cache_t * slab_cache_create(char *name, |
455 | size_t size, |
529 | size_t size, |
456 | size_t align, |
530 | size_t align, |
457 | int (*constructor)(void *obj, int kmflag), |
531 | int (*constructor)(void *obj, int kmflag), |
458 | void (*destructor)(void *obj), |
532 | void (*destructor)(void *obj), |
459 | int flags) |
533 | int flags) |
460 | { |
534 | { |
461 | slab_cache_t *cache; |
535 | slab_cache_t *cache; |
462 | 536 | ||
463 | cache = malloc(sizeof(*cache) + config.cpu_count*sizeof(cache->mag_cache[0])); |
537 | cache = slab_alloc(&slab_cache_cache, 0); |
464 | _slab_cache_create(cache, name, size, align, constructor, destructor, |
538 | _slab_cache_create(cache, name, size, align, constructor, destructor, |
465 | flags); |
539 | flags); |
466 | return cache; |
540 | return cache; |
467 | } |
541 | } |
468 | 542 | ||
469 | /** |
543 | /** |
470 | * Reclaim space occupied by objects that are already free |
544 | * Reclaim space occupied by objects that are already free |
471 | * |
545 | * |
472 | * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing |
546 | * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing |
473 | * @return Number of freed pages |
547 | * @return Number of freed pages |
474 | */ |
548 | */ |
475 | static count_t _slab_reclaim(slab_cache_t *cache, int flags) |
549 | static count_t _slab_reclaim(slab_cache_t *cache, int flags) |
476 | { |
550 | { |
477 | int i; |
551 | int i; |
478 | slab_magazine_t *mag; |
552 | slab_magazine_t *mag; |
479 | link_t *cur; |
553 | link_t *cur; |
480 | count_t frames = 0; |
554 | count_t frames = 0; |
481 | 555 | ||
482 | if (cache->flags & SLAB_CACHE_NOMAGAZINE) |
556 | if (cache->flags & SLAB_CACHE_NOMAGAZINE) |
483 | return 0; /* Nothing to do */ |
557 | return 0; /* Nothing to do */ |
484 | 558 | ||
485 | /* First lock all cpu caches, then the complete cache lock */ |
559 | /* First lock all cpu caches, then the complete cache lock */ |
- | 560 | if (flags & SLAB_RECLAIM_ALL) { |
|
486 | for (i=0; i < config.cpu_count; i++) |
561 | for (i=0; i < config.cpu_count; i++) |
487 | spinlock_lock(&cache->mag_cache[i].lock); |
562 | spinlock_lock(&cache->mag_cache[i].lock); |
- | 563 | } |
|
488 | spinlock_lock(&cache->lock); |
564 | spinlock_lock(&cache->lock); |
489 | 565 | ||
490 | if (flags & SLAB_RECLAIM_ALL) { |
566 | if (flags & SLAB_RECLAIM_ALL) { |
491 | /* Aggressive memfree */ |
567 | /* Aggressive memfree */ |
492 | /* Destroy CPU magazines */ |
568 | /* Destroy CPU magazines */ |
493 | for (i=0; i<config.cpu_count; i++) { |
569 | for (i=0; i<config.cpu_count; i++) { |
494 | mag = cache->mag_cache[i].current; |
570 | mag = cache->mag_cache[i].current; |
495 | if (mag) |
571 | if (mag) |
496 | frames += magazine_destroy(cache, mag); |
572 | frames += magazine_destroy(cache, mag); |
497 | cache->mag_cache[i].current = NULL; |
573 | cache->mag_cache[i].current = NULL; |
498 | 574 | ||
499 | mag = cache->mag_cache[i].last; |
575 | mag = cache->mag_cache[i].last; |
500 | if (mag) |
576 | if (mag) |
501 | frames += magazine_destroy(cache, mag); |
577 | frames += magazine_destroy(cache, mag); |
502 | cache->mag_cache[i].last = NULL; |
578 | cache->mag_cache[i].last = NULL; |
503 | } |
579 | } |
504 | } |
580 | } |
505 | /* Destroy full magazines */ |
581 | /* Destroy full magazines */ |
506 | cur=cache->magazines.prev; |
582 | cur=cache->magazines.prev; |
507 | 583 | ||
508 | while (cur != &cache->magazines) { |
584 | while (cur != &cache->magazines) { |
509 | mag = list_get_instance(cur, slab_magazine_t, link); |
585 | mag = list_get_instance(cur, slab_magazine_t, link); |
510 | 586 | ||
511 | cur = cur->prev; |
587 | cur = cur->prev; |
512 | list_remove(&mag->link); |
588 | list_remove(&mag->link); |
513 | frames += magazine_destroy(cache,mag); |
589 | frames += magazine_destroy(cache,mag); |
514 | /* If we do not do full reclaim, break |
590 | /* If we do not do full reclaim, break |
515 | * as soon as something is freed */ |
591 | * as soon as something is freed */ |
516 | if (!(flags & SLAB_RECLAIM_ALL) && frames) |
592 | if (!(flags & SLAB_RECLAIM_ALL) && frames) |
517 | break; |
593 | break; |
518 | } |
594 | } |
519 | 595 | ||
520 | spinlock_unlock(&cache->lock); |
596 | spinlock_unlock(&cache->lock); |
- | 597 | if (flags & SLAB_RECLAIM_ALL) { |
|
521 | for (i=0; i < config.cpu_count; i++) |
598 | for (i=0; i < config.cpu_count; i++) |
522 | spinlock_unlock(&cache->mag_cache[i].lock); |
599 | spinlock_unlock(&cache->mag_cache[i].lock); |
- | 600 | } |
|
523 | 601 | ||
524 | return frames; |
602 | return frames; |
525 | } |
603 | } |
526 | 604 | ||
527 | /** Check that there are no slabs and remove cache from system */ |
605 | /** Check that there are no slabs and remove cache from system */ |
528 | void slab_cache_destroy(slab_cache_t *cache) |
606 | void slab_cache_destroy(slab_cache_t *cache) |
529 | { |
607 | { |
530 | /* Do not lock anything, we assume the software is correct and |
608 | /* Do not lock anything, we assume the software is correct and |
531 | * does not touch the cache when it decides to destroy it */ |
609 | * does not touch the cache when it decides to destroy it */ |
532 | 610 | ||
533 | /* Destroy all magazines */ |
611 | /* Destroy all magazines */ |
534 | _slab_reclaim(cache, SLAB_RECLAIM_ALL); |
612 | _slab_reclaim(cache, SLAB_RECLAIM_ALL); |
535 | 613 | ||
536 | /* All slabs must be empty */ |
614 | /* All slabs must be empty */ |
537 | if (!list_empty(&cache->full_slabs) \ |
615 | if (!list_empty(&cache->full_slabs) \ |
538 | || !list_empty(&cache->partial_slabs)) |
616 | || !list_empty(&cache->partial_slabs)) |
539 | panic("Destroying cache that is not empty."); |
617 | panic("Destroying cache that is not empty."); |
540 | 618 | ||
541 | spinlock_lock(&slab_cache_lock); |
619 | spinlock_lock(&slab_cache_lock); |
542 | list_remove(&cache->link); |
620 | list_remove(&cache->link); |
543 | spinlock_unlock(&slab_cache_lock); |
621 | spinlock_unlock(&slab_cache_lock); |
544 | 622 | ||
545 | free(cache); |
623 | slab_free(&slab_cache_cache, cache); |
546 | } |
624 | } |
547 | 625 | ||
548 | /** Allocate new object from cache - if no flags given, always returns |
626 | /** Allocate new object from cache - if no flags given, always returns |
549 | memory */ |
627 | memory */ |
550 | void * slab_alloc(slab_cache_t *cache, int flags) |
628 | void * slab_alloc(slab_cache_t *cache, int flags) |
551 | { |
629 | { |
552 | ipl_t ipl; |
630 | ipl_t ipl; |
553 | void *result = NULL; |
631 | void *result = NULL; |
554 | 632 | ||
555 | /* Disable interrupts to avoid deadlocks with interrupt handlers */ |
633 | /* Disable interrupts to avoid deadlocks with interrupt handlers */ |
556 | ipl = interrupts_disable(); |
634 | ipl = interrupts_disable(); |
557 | 635 | ||
558 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) |
636 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) |
559 | result = magazine_obj_get(cache); |
637 | result = magazine_obj_get(cache); |
560 | 638 | ||
561 | if (!result) { |
639 | if (!result) { |
562 | spinlock_lock(&cache->lock); |
640 | spinlock_lock(&cache->lock); |
563 | result = slab_obj_create(cache, flags); |
641 | result = slab_obj_create(cache, flags); |
564 | spinlock_unlock(&cache->lock); |
642 | spinlock_unlock(&cache->lock); |
565 | } |
643 | } |
566 | 644 | ||
567 | if (result) |
- | |
568 | atomic_inc(&cache->allocated_objs); |
- | |
569 | - | ||
570 | interrupts_restore(ipl); |
645 | interrupts_restore(ipl); |
571 | 646 | ||
- | 647 | if (result) |
|
- | 648 | atomic_inc(&cache->allocated_objs); |
|
572 | 649 | ||
573 | return result; |
650 | return result; |
574 | } |
651 | } |
575 | 652 | ||
576 | /** Return object to cache */ |
653 | /** Return object to cache */ |
577 | void slab_free(slab_cache_t *cache, void *obj) |
654 | void slab_free(slab_cache_t *cache, void *obj) |
578 | { |
655 | { |
579 | ipl_t ipl; |
656 | ipl_t ipl; |
580 | 657 | ||
581 | ipl = interrupts_disable(); |
658 | ipl = interrupts_disable(); |
582 | 659 | ||
583 | if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \ |
660 | if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \ |
584 | || magazine_obj_put(cache, obj)) { |
661 | || magazine_obj_put(cache, obj)) { |
585 | 662 | ||
586 | spinlock_lock(&cache->lock); |
663 | spinlock_lock(&cache->lock); |
587 | slab_obj_destroy(cache, obj, NULL); |
664 | slab_obj_destroy(cache, obj, NULL); |
588 | spinlock_unlock(&cache->lock); |
665 | spinlock_unlock(&cache->lock); |
589 | } |
666 | } |
590 | atomic_dec(&cache->allocated_objs); |
- | |
591 | interrupts_restore(ipl); |
667 | interrupts_restore(ipl); |
- | 668 | atomic_dec(&cache->allocated_objs); |
|
592 | } |
669 | } |
593 | 670 | ||
594 | /* Go through all caches and reclaim what is possible */ |
671 | /* Go through all caches and reclaim what is possible */ |
595 | count_t slab_reclaim(int flags) |
672 | count_t slab_reclaim(int flags) |
596 | { |
673 | { |
597 | slab_cache_t *cache; |
674 | slab_cache_t *cache; |
598 | link_t *cur; |
675 | link_t *cur; |
599 | count_t frames = 0; |
676 | count_t frames = 0; |
600 | 677 | ||
601 | spinlock_lock(&slab_cache_lock); |
678 | spinlock_lock(&slab_cache_lock); |
602 | 679 | ||
603 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
680 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
604 | cache = list_get_instance(cur, slab_cache_t, link); |
681 | cache = list_get_instance(cur, slab_cache_t, link); |
605 | frames += _slab_reclaim(cache, flags); |
682 | frames += _slab_reclaim(cache, flags); |
606 | } |
683 | } |
607 | 684 | ||
608 | spinlock_unlock(&slab_cache_lock); |
685 | spinlock_unlock(&slab_cache_lock); |
609 | 686 | ||
610 | return frames; |
687 | return frames; |
611 | } |
688 | } |
612 | 689 | ||
613 | 690 | ||
614 | /* Print list of slabs */ |
691 | /* Print list of slabs */ |
615 | void slab_print_list(void) |
692 | void slab_print_list(void) |
616 | { |
693 | { |
617 | slab_cache_t *cache; |
694 | slab_cache_t *cache; |
618 | link_t *cur; |
695 | link_t *cur; |
619 | 696 | ||
620 | spinlock_lock(&slab_cache_lock); |
697 | spinlock_lock(&slab_cache_lock); |
621 | printf("SLAB name\tOsize\tPages\tObj/pg\tSlabs\tCached\tAllocobjs\tCtl\n"); |
698 | printf("SLAB name\tOsize\tPages\tObj/pg\tSlabs\tCached\tAllocobjs\tCtl\n"); |
622 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
699 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
623 | cache = list_get_instance(cur, slab_cache_t, link); |
700 | cache = list_get_instance(cur, slab_cache_t, link); |
624 | printf("%s\t%d\t%d\t%d\t%d\t%d\t%d\t\t%s\n", cache->name, cache->size, |
701 | printf("%s\t%d\t%d\t%d\t%d\t%d\t%d\t\t%s\n", cache->name, cache->size, |
625 | (1 << cache->order), cache->objects, |
702 | (1 << cache->order), cache->objects, |
626 | atomic_get(&cache->allocated_slabs), |
703 | atomic_get(&cache->allocated_slabs), |
627 | atomic_get(&cache->cached_objs), |
704 | atomic_get(&cache->cached_objs), |
628 | atomic_get(&cache->allocated_objs), |
705 | atomic_get(&cache->allocated_objs), |
629 | cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out"); |
706 | cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out"); |
630 | } |
707 | } |
631 | spinlock_unlock(&slab_cache_lock); |
708 | spinlock_unlock(&slab_cache_lock); |
632 | } |
709 | } |
633 | 710 | ||
634 | void slab_cache_init(void) |
711 | void slab_cache_init(void) |
635 | { |
712 | { |
636 | /* Initialize magazine cache */ |
713 | /* Initialize magazine cache */ |
637 | _slab_cache_create(&mag_cache, |
714 | _slab_cache_create(&mag_cache, |
638 | "slab_magazine", |
715 | "slab_magazine", |
639 | sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*), |
716 | sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*), |
640 | sizeof(__address), |
717 | sizeof(__address), |
641 | NULL, NULL, |
718 | NULL, NULL, |
- | 719 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
|
- | 720 | /* Initialize slab_cache cache */ |
|
- | 721 | _slab_cache_create(&slab_cache_cache, |
|
- | 722 | "slab_cache", |
|
- | 723 | sizeof(slab_cache_cache) + config.cpu_count*sizeof(slab_cache_cache.mag_cache[0]), |
|
- | 724 | sizeof(__address), |
|
- | 725 | NULL, NULL, |
|
- | 726 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
|
- | 727 | /* Initialize external slab cache */ |
|
- | 728 | slab_extern_cache = slab_cache_create("slab_extern", |
|
- | 729 | sizeof(slab_t), |
|
- | 730 | 0, NULL, NULL, |
|
642 | SLAB_CACHE_NOMAGAZINE); |
731 | SLAB_CACHE_SLINSIDE); |
643 | 732 | ||
644 | /* Initialize structures for malloc */ |
733 | /* Initialize structures for malloc */ |
645 | } |
734 | } |
646 | 735 |