Rev 773 | Rev 776 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 773 | Rev 775 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2006 Ondrej Palkovsky |
2 | * Copyright (C) 2006 Ondrej Palkovsky |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /* |
29 | /* |
30 | * The SLAB allocator is closely modelled after Opensolaris SLAB allocator |
30 | * The SLAB allocator is closely modelled after Opensolaris SLAB allocator |
31 | * http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/ |
31 | * http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/ |
32 | * |
32 | * |
33 | * with the following exceptions: |
33 | * with the following exceptions: |
34 | * - empty SLABS are deallocated immediately |
34 | * - empty SLABS are deallocated immediately |
35 | * (in Linux they are kept in linked list, in Solaris ???) |
35 | * (in Linux they are kept in linked list, in Solaris ???) |
36 | * - empty magazines are deallocated when not needed |
36 | * - empty magazines are deallocated when not needed |
37 | * (in Solaris they are held in linked list in slab cache) |
37 | * (in Solaris they are held in linked list in slab cache) |
38 | * |
38 | * |
39 | * Following features are not currently supported but would be easy to do: |
39 | * Following features are not currently supported but would be easy to do: |
40 | * - cache coloring |
40 | * - cache coloring |
41 | * - dynamic magazine growing (different magazine sizes are already |
41 | * - dynamic magazine growing (different magazine sizes are already |
42 | * supported, but we would need to adjust allocating strategy) |
42 | * supported, but we would need to adjust allocating strategy) |
43 | * |
43 | * |
44 | * The SLAB allocator supports per-CPU caches ('magazines') to facilitate |
44 | * The SLAB allocator supports per-CPU caches ('magazines') to facilitate |
45 | * good SMP scaling. |
45 | * good SMP scaling. |
46 | * |
46 | * |
47 | * When a new object is being allocated, it is first checked, if it is |
47 | * When a new object is being allocated, it is first checked, if it is |
48 | * available in CPU-bound magazine. If it is not found there, it is |
48 | * available in CPU-bound magazine. If it is not found there, it is |
49 | * allocated from CPU-shared SLAB - if partial full is found, it is used, |
49 | * allocated from CPU-shared SLAB - if partial full is found, it is used, |
50 | * otherwise a new one is allocated. |
50 | * otherwise a new one is allocated. |
51 | * |
51 | * |
52 | * When an object is being deallocated, it is put to CPU-bound magazine. |
52 | * When an object is being deallocated, it is put to CPU-bound magazine. |
53 | * If there is no such magazine, new one is allocated (if it fails, |
53 | * If there is no such magazine, new one is allocated (if it fails, |
54 | * the object is deallocated into SLAB). If the magazine is full, it is |
54 | * the object is deallocated into SLAB). If the magazine is full, it is |
55 | * put into cpu-shared list of magazines and new one is allocated. |
55 | * put into cpu-shared list of magazines and new one is allocated. |
56 | * |
56 | * |
57 | * The CPU-bound magazine is actually a pair of magazine to avoid |
57 | * The CPU-bound magazine is actually a pair of magazine to avoid |
58 | * thrashing when somebody is allocating/deallocating 1 item at the magazine |
58 | * thrashing when somebody is allocating/deallocating 1 item at the magazine |
59 | * size boundary. LIFO order is enforced, which should avoid fragmentation |
59 | * size boundary. LIFO order is enforced, which should avoid fragmentation |
60 | * as much as possible. |
60 | * as much as possible. |
61 | * |
61 | * |
62 | * Every cache contains list of full slabs and list of partialy full slabs. |
62 | * Every cache contains list of full slabs and list of partialy full slabs. |
63 | * Empty SLABS are immediately freed (thrashing will be avoided because |
63 | * Empty SLABS are immediately freed (thrashing will be avoided because |
64 | * of magazines). |
64 | * of magazines). |
65 | * |
65 | * |
66 | * The SLAB information structure is kept inside the data area, if possible. |
66 | * The SLAB information structure is kept inside the data area, if possible. |
67 | * The cache can be marked that it should not use magazines. This is used |
67 | * The cache can be marked that it should not use magazines. This is used |
68 | * only for SLAB related caches to avoid deadlocks and infinite recursion |
68 | * only for SLAB related caches to avoid deadlocks and infinite recursion |
69 | * (the SLAB allocator uses itself for allocating all it's control structures). |
69 | * (the SLAB allocator uses itself for allocating all it's control structures). |
70 | * |
70 | * |
71 | * The SLAB allocator allocates lot of space and does not free it. When |
71 | * The SLAB allocator allocates lot of space and does not free it. When |
72 | * frame allocator fails to allocate the frame, it calls slab_reclaim(). |
72 | * frame allocator fails to allocate the frame, it calls slab_reclaim(). |
73 | * It tries 'light reclaim' first, then brutal reclaim. The light reclaim |
73 | * It tries 'light reclaim' first, then brutal reclaim. The light reclaim |
74 | * releases slabs from cpu-shared magazine-list, until at least 1 slab |
74 | * releases slabs from cpu-shared magazine-list, until at least 1 slab |
75 | * is deallocated in each cache (this algorithm should probably change). |
75 | * is deallocated in each cache (this algorithm should probably change). |
76 | * The brutal reclaim removes all cached objects, even from CPU-bound |
76 | * The brutal reclaim removes all cached objects, even from CPU-bound |
77 | * magazines. |
77 | * magazines. |
78 | * |
78 | * |
- | 79 | * TODO: For better CPU-scaling the magazine allocation strategy should |
|
- | 80 | * be extended. Currently, if the cache does not have magazine, it asks |
|
- | 81 | * for non-cpu cached magazine cache to provide one. It might be feasible |
|
- | 82 | * to add cpu-cached magazine cache (which would allocate it's magazines |
|
- | 83 | * from non-cpu-cached mag. cache). This would provide a nice per-cpu |
|
- | 84 | * buffer. The other possibility is to use the per-cache |
|
- | 85 | * 'empty-magazine-list', which decreases competing for 1 per-system |
|
- | 86 | * magazine cache. |
|
79 | * |
87 | * |
80 | */ |
88 | */ |
81 | 89 | ||
82 | 90 | ||
83 | #include <synch/spinlock.h> |
91 | #include <synch/spinlock.h> |
84 | #include <mm/slab.h> |
92 | #include <mm/slab.h> |
85 | #include <list.h> |
93 | #include <list.h> |
86 | #include <memstr.h> |
94 | #include <memstr.h> |
87 | #include <align.h> |
95 | #include <align.h> |
88 | #include <mm/heap.h> |
96 | #include <mm/heap.h> |
89 | #include <mm/frame.h> |
97 | #include <mm/frame.h> |
90 | #include <config.h> |
98 | #include <config.h> |
91 | #include <print.h> |
99 | #include <print.h> |
92 | #include <arch.h> |
100 | #include <arch.h> |
93 | #include <panic.h> |
101 | #include <panic.h> |
94 | #include <debug.h> |
102 | #include <debug.h> |
95 | #include <bitops.h> |
103 | #include <bitops.h> |
96 | 104 | ||
97 | SPINLOCK_INITIALIZE(slab_cache_lock); |
105 | SPINLOCK_INITIALIZE(slab_cache_lock); |
98 | static LIST_INITIALIZE(slab_cache_list); |
106 | static LIST_INITIALIZE(slab_cache_list); |
99 | 107 | ||
100 | /** Magazine cache */ |
108 | /** Magazine cache */ |
101 | static slab_cache_t mag_cache; |
109 | static slab_cache_t mag_cache; |
102 | /** Cache for cache descriptors */ |
110 | /** Cache for cache descriptors */ |
103 | static slab_cache_t slab_cache_cache; |
111 | static slab_cache_t slab_cache_cache; |
104 | 112 | ||
105 | /** Cache for external slab descriptors |
113 | /** Cache for external slab descriptors |
106 | * This time we want per-cpu cache, so do not make it static |
114 | * This time we want per-cpu cache, so do not make it static |
107 | * - using SLAB for internal SLAB structures will not deadlock, |
115 | * - using SLAB for internal SLAB structures will not deadlock, |
108 | * as all slab structures are 'small' - control structures of |
116 | * as all slab structures are 'small' - control structures of |
109 | * their caches do not require further allocation |
117 | * their caches do not require further allocation |
110 | */ |
118 | */ |
111 | static slab_cache_t *slab_extern_cache; |
119 | static slab_cache_t *slab_extern_cache; |
112 | /** Caches for malloc */ |
120 | /** Caches for malloc */ |
113 | static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1]; |
121 | static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1]; |
114 | char *malloc_names[] = { |
122 | char *malloc_names[] = { |
115 | "malloc-8","malloc-16","malloc-32","malloc-64","malloc-128", |
123 | "malloc-8","malloc-16","malloc-32","malloc-64","malloc-128", |
116 | "malloc-256","malloc-512","malloc-1K","malloc-2K", |
124 | "malloc-256","malloc-512","malloc-1K","malloc-2K", |
117 | "malloc-4K","malloc-8K","malloc-16K","malloc-32K", |
125 | "malloc-4K","malloc-8K","malloc-16K","malloc-32K", |
118 | "malloc-64K","malloc-128K" |
126 | "malloc-64K","malloc-128K" |
119 | }; |
127 | }; |
120 | 128 | ||
121 | /** Slab descriptor */ |
129 | /** Slab descriptor */ |
122 | typedef struct { |
130 | typedef struct { |
123 | slab_cache_t *cache; /**< Pointer to parent cache */ |
131 | slab_cache_t *cache; /**< Pointer to parent cache */ |
124 | link_t link; /* List of full/partial slabs */ |
132 | link_t link; /* List of full/partial slabs */ |
125 | void *start; /**< Start address of first available item */ |
133 | void *start; /**< Start address of first available item */ |
126 | count_t available; /**< Count of available items in this slab */ |
134 | count_t available; /**< Count of available items in this slab */ |
127 | index_t nextavail; /**< The index of next available item */ |
135 | index_t nextavail; /**< The index of next available item */ |
128 | }slab_t; |
136 | }slab_t; |
129 | 137 | ||
130 | /**************************************/ |
138 | /**************************************/ |
131 | /* SLAB allocation functions */ |
139 | /* SLAB allocation functions */ |
132 | 140 | ||
133 | /** |
141 | /** |
134 | * Allocate frames for slab space and initialize |
142 | * Allocate frames for slab space and initialize |
135 | * |
143 | * |
136 | */ |
144 | */ |
137 | static slab_t * slab_space_alloc(slab_cache_t *cache, int flags) |
145 | static slab_t * slab_space_alloc(slab_cache_t *cache, int flags) |
138 | { |
146 | { |
139 | void *data; |
147 | void *data; |
140 | slab_t *slab; |
148 | slab_t *slab; |
141 | size_t fsize; |
149 | size_t fsize; |
142 | int i; |
150 | int i; |
143 | zone_t *zone = NULL; |
151 | zone_t *zone = NULL; |
144 | int status; |
152 | int status; |
145 | frame_t *frame; |
153 | frame_t *frame; |
146 | 154 | ||
147 | data = (void *)frame_alloc(FRAME_KA | flags, cache->order, &status, &zone); |
155 | data = (void *)frame_alloc(FRAME_KA | flags, cache->order, &status, &zone); |
148 | if (status != FRAME_OK) { |
156 | if (status != FRAME_OK) { |
149 | return NULL; |
157 | return NULL; |
150 | } |
158 | } |
151 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) { |
159 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) { |
152 | slab = slab_alloc(slab_extern_cache, flags); |
160 | slab = slab_alloc(slab_extern_cache, flags); |
153 | if (!slab) { |
161 | if (!slab) { |
154 | frame_free((__address)data); |
162 | frame_free((__address)data); |
155 | return NULL; |
163 | return NULL; |
156 | } |
164 | } |
157 | } else { |
165 | } else { |
158 | fsize = (PAGE_SIZE << cache->order); |
166 | fsize = (PAGE_SIZE << cache->order); |
159 | slab = data + fsize - sizeof(*slab); |
167 | slab = data + fsize - sizeof(*slab); |
160 | } |
168 | } |
161 | 169 | ||
162 | /* Fill in slab structures */ |
170 | /* Fill in slab structures */ |
163 | /* TODO: some better way of accessing the frame */ |
171 | /* TODO: some better way of accessing the frame */ |
164 | for (i=0; i < (1 << cache->order); i++) { |
172 | for (i=0; i < (1 << cache->order); i++) { |
165 | frame = ADDR2FRAME(zone, KA2PA((__address)(data+i*PAGE_SIZE))); |
173 | frame = ADDR2FRAME(zone, KA2PA((__address)(data+i*PAGE_SIZE))); |
166 | frame->parent = slab; |
174 | frame->parent = slab; |
167 | } |
175 | } |
168 | 176 | ||
169 | slab->start = data; |
177 | slab->start = data; |
170 | slab->available = cache->objects; |
178 | slab->available = cache->objects; |
171 | slab->nextavail = 0; |
179 | slab->nextavail = 0; |
172 | slab->cache = cache; |
180 | slab->cache = cache; |
173 | 181 | ||
174 | for (i=0; i<cache->objects;i++) |
182 | for (i=0; i<cache->objects;i++) |
175 | *((int *) (slab->start + i*cache->size)) = i+1; |
183 | *((int *) (slab->start + i*cache->size)) = i+1; |
176 | 184 | ||
177 | atomic_inc(&cache->allocated_slabs); |
185 | atomic_inc(&cache->allocated_slabs); |
178 | return slab; |
186 | return slab; |
179 | } |
187 | } |
180 | 188 | ||
181 | /** |
189 | /** |
182 | * Deallocate space associated with SLAB |
190 | * Deallocate space associated with SLAB |
183 | * |
191 | * |
184 | * @return number of freed frames |
192 | * @return number of freed frames |
185 | */ |
193 | */ |
186 | static count_t slab_space_free(slab_cache_t *cache, slab_t *slab) |
194 | static count_t slab_space_free(slab_cache_t *cache, slab_t *slab) |
187 | { |
195 | { |
188 | frame_free((__address)slab->start); |
196 | frame_free((__address)slab->start); |
189 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) |
197 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) |
190 | slab_free(slab_extern_cache, slab); |
198 | slab_free(slab_extern_cache, slab); |
191 | 199 | ||
192 | atomic_dec(&cache->allocated_slabs); |
200 | atomic_dec(&cache->allocated_slabs); |
193 | 201 | ||
194 | return 1 << cache->order; |
202 | return 1 << cache->order; |
195 | } |
203 | } |
196 | 204 | ||
197 | /** Map object to slab structure */ |
205 | /** Map object to slab structure */ |
198 | static slab_t * obj2slab(void *obj) |
206 | static slab_t * obj2slab(void *obj) |
199 | { |
207 | { |
200 | frame_t *frame; |
208 | frame_t *frame; |
201 | 209 | ||
202 | frame = frame_addr2frame((__address)obj); |
210 | frame = frame_addr2frame((__address)obj); |
203 | return (slab_t *)frame->parent; |
211 | return (slab_t *)frame->parent; |
204 | } |
212 | } |
205 | 213 | ||
206 | /**************************************/ |
214 | /**************************************/ |
207 | /* SLAB functions */ |
215 | /* SLAB functions */ |
208 | 216 | ||
209 | 217 | ||
210 | /** |
218 | /** |
211 | * Return object to slab and call a destructor |
219 | * Return object to slab and call a destructor |
212 | * |
220 | * |
213 | * Assume the cache->lock is held; |
221 | * Assume the cache->lock is held; |
214 | * |
222 | * |
215 | * @param slab If the caller knows directly slab of the object, otherwise NULL |
223 | * @param slab If the caller knows directly slab of the object, otherwise NULL |
216 | * |
224 | * |
217 | * @return Number of freed pages |
225 | * @return Number of freed pages |
218 | */ |
226 | */ |
219 | static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, |
227 | static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, |
220 | slab_t *slab) |
228 | slab_t *slab) |
221 | { |
229 | { |
222 | count_t frames = 0; |
230 | count_t frames = 0; |
223 | 231 | ||
224 | if (!slab) |
232 | if (!slab) |
225 | slab = obj2slab(obj); |
233 | slab = obj2slab(obj); |
226 | 234 | ||
227 | ASSERT(slab->cache == cache); |
235 | ASSERT(slab->cache == cache); |
228 | 236 | ||
229 | *((int *)obj) = slab->nextavail; |
237 | *((int *)obj) = slab->nextavail; |
230 | slab->nextavail = (obj - slab->start)/cache->size; |
238 | slab->nextavail = (obj - slab->start)/cache->size; |
231 | slab->available++; |
239 | slab->available++; |
232 | 240 | ||
233 | /* Move it to correct list */ |
241 | /* Move it to correct list */ |
234 | if (slab->available == 1) { |
242 | if (slab->available == 1) { |
235 | /* It was in full, move to partial */ |
243 | /* It was in full, move to partial */ |
236 | list_remove(&slab->link); |
244 | list_remove(&slab->link); |
237 | list_prepend(&slab->link, &cache->partial_slabs); |
245 | list_prepend(&slab->link, &cache->partial_slabs); |
238 | } |
246 | } |
239 | if (slab->available == cache->objects) { |
247 | if (slab->available == cache->objects) { |
240 | /* Free associated memory */ |
248 | /* Free associated memory */ |
241 | list_remove(&slab->link); |
249 | list_remove(&slab->link); |
242 | /* Avoid deadlock */ |
250 | /* Avoid deadlock */ |
243 | spinlock_unlock(&cache->lock); |
251 | spinlock_unlock(&cache->lock); |
244 | frames = slab_space_free(cache, slab); |
252 | frames = slab_space_free(cache, slab); |
245 | spinlock_lock(&cache->lock); |
253 | spinlock_lock(&cache->lock); |
246 | } |
254 | } |
247 | 255 | ||
248 | return frames; |
256 | return frames; |
249 | } |
257 | } |
250 | 258 | ||
251 | /** |
259 | /** |
252 | * Take new object from slab or create new if needed |
260 | * Take new object from slab or create new if needed |
253 | * |
261 | * |
254 | * Assume cache->lock is held. |
262 | * Assume cache->lock is held. |
255 | * |
263 | * |
256 | * @return Object address or null |
264 | * @return Object address or null |
257 | */ |
265 | */ |
258 | static void * slab_obj_create(slab_cache_t *cache, int flags) |
266 | static void * slab_obj_create(slab_cache_t *cache, int flags) |
259 | { |
267 | { |
260 | slab_t *slab; |
268 | slab_t *slab; |
261 | void *obj; |
269 | void *obj; |
262 | 270 | ||
263 | if (list_empty(&cache->partial_slabs)) { |
271 | if (list_empty(&cache->partial_slabs)) { |
264 | /* Allow recursion and reclaiming |
272 | /* Allow recursion and reclaiming |
265 | * - this should work, as the SLAB control structures |
273 | * - this should work, as the SLAB control structures |
266 | * are small and do not need to allocte with anything |
274 | * are small and do not need to allocte with anything |
267 | * other ten frame_alloc when they are allocating, |
275 | * other ten frame_alloc when they are allocating, |
268 | * that's why we should get recursion at most 1-level deep |
276 | * that's why we should get recursion at most 1-level deep |
269 | */ |
277 | */ |
270 | spinlock_unlock(&cache->lock); |
278 | spinlock_unlock(&cache->lock); |
271 | slab = slab_space_alloc(cache, flags); |
279 | slab = slab_space_alloc(cache, flags); |
272 | spinlock_lock(&cache->lock); |
280 | spinlock_lock(&cache->lock); |
273 | if (!slab) { |
281 | if (!slab) { |
274 | return NULL; |
282 | return NULL; |
275 | } |
283 | } |
276 | } else { |
284 | } else { |
277 | slab = list_get_instance(cache->partial_slabs.next, |
285 | slab = list_get_instance(cache->partial_slabs.next, |
278 | slab_t, |
286 | slab_t, |
279 | link); |
287 | link); |
280 | list_remove(&slab->link); |
288 | list_remove(&slab->link); |
281 | } |
289 | } |
282 | obj = slab->start + slab->nextavail * cache->size; |
290 | obj = slab->start + slab->nextavail * cache->size; |
283 | slab->nextavail = *((int *)obj); |
291 | slab->nextavail = *((int *)obj); |
284 | slab->available--; |
292 | slab->available--; |
285 | if (! slab->available) |
293 | if (! slab->available) |
286 | list_prepend(&slab->link, &cache->full_slabs); |
294 | list_prepend(&slab->link, &cache->full_slabs); |
287 | else |
295 | else |
288 | list_prepend(&slab->link, &cache->partial_slabs); |
296 | list_prepend(&slab->link, &cache->partial_slabs); |
289 | return obj; |
297 | return obj; |
290 | } |
298 | } |
291 | 299 | ||
292 | /**************************************/ |
300 | /**************************************/ |
293 | /* CPU-Cache slab functions */ |
301 | /* CPU-Cache slab functions */ |
294 | 302 | ||
295 | /** |
303 | /** |
296 | * Free all objects in magazine and free memory associated with magazine |
304 | * Free all objects in magazine and free memory associated with magazine |
297 | * |
305 | * |
298 | * Assume mag_cache[cpu].lock is locked |
306 | * Assume cache->lock is held |
299 | * |
307 | * |
300 | * @return Number of freed pages |
308 | * @return Number of freed pages |
301 | */ |
309 | */ |
302 | static count_t magazine_destroy(slab_cache_t *cache, |
310 | static count_t magazine_destroy(slab_cache_t *cache, |
303 | slab_magazine_t *mag) |
311 | slab_magazine_t *mag) |
304 | { |
312 | { |
305 | int i; |
313 | int i; |
306 | count_t frames = 0; |
314 | count_t frames = 0; |
307 | 315 | ||
308 | for (i=0;i < mag->busy; i++) { |
316 | for (i=0;i < mag->busy; i++) { |
309 | frames += slab_obj_destroy(cache, mag->objs[i], NULL); |
317 | frames += slab_obj_destroy(cache, mag->objs[i], NULL); |
310 | atomic_dec(&cache->cached_objs); |
318 | atomic_dec(&cache->cached_objs); |
311 | } |
319 | } |
312 | 320 | ||
313 | slab_free(&mag_cache, mag); |
321 | slab_free(&mag_cache, mag); |
314 | 322 | ||
315 | return frames; |
323 | return frames; |
316 | } |
324 | } |
317 | 325 | ||
318 | /** |
326 | /** |
319 | * Find full magazine, set it as current and return it |
327 | * Find full magazine, set it as current and return it |
320 | * |
328 | * |
321 | * Assume cpu_magazine lock is held |
329 | * Assume cpu_magazine lock is held |
322 | */ |
330 | */ |
323 | static slab_magazine_t * get_full_current_mag(slab_cache_t *cache) |
331 | static slab_magazine_t * get_full_current_mag(slab_cache_t *cache) |
324 | { |
332 | { |
325 | slab_magazine_t *cmag, *lastmag, *newmag; |
333 | slab_magazine_t *cmag, *lastmag, *newmag; |
326 | 334 | ||
327 | cmag = cache->mag_cache[CPU->id].current; |
335 | cmag = cache->mag_cache[CPU->id].current; |
328 | lastmag = cache->mag_cache[CPU->id].last; |
336 | lastmag = cache->mag_cache[CPU->id].last; |
329 | if (cmag) { /* First try local CPU magazines */ |
337 | if (cmag) { /* First try local CPU magazines */ |
330 | if (cmag->busy) |
338 | if (cmag->busy) |
331 | return cmag; |
339 | return cmag; |
332 | 340 | ||
333 | if (lastmag && lastmag->busy) { |
341 | if (lastmag && lastmag->busy) { |
334 | cache->mag_cache[CPU->id].current = lastmag; |
342 | cache->mag_cache[CPU->id].current = lastmag; |
335 | cache->mag_cache[CPU->id].last = cmag; |
343 | cache->mag_cache[CPU->id].last = cmag; |
336 | return lastmag; |
344 | return lastmag; |
337 | } |
345 | } |
338 | } |
346 | } |
339 | /* Local magazines are empty, import one from magazine list */ |
347 | /* Local magazines are empty, import one from magazine list */ |
340 | spinlock_lock(&cache->lock); |
348 | spinlock_lock(&cache->lock); |
341 | if (list_empty(&cache->magazines)) { |
349 | if (list_empty(&cache->magazines)) { |
342 | spinlock_unlock(&cache->lock); |
350 | spinlock_unlock(&cache->lock); |
343 | return NULL; |
351 | return NULL; |
344 | } |
352 | } |
345 | newmag = list_get_instance(cache->magazines.next, |
353 | newmag = list_get_instance(cache->magazines.next, |
346 | slab_magazine_t, |
354 | slab_magazine_t, |
347 | link); |
355 | link); |
348 | list_remove(&newmag->link); |
356 | list_remove(&newmag->link); |
349 | spinlock_unlock(&cache->lock); |
357 | spinlock_unlock(&cache->lock); |
350 | 358 | ||
351 | if (lastmag) |
359 | if (lastmag) |
352 | slab_free(&mag_cache, lastmag); |
360 | slab_free(&mag_cache, lastmag); |
353 | cache->mag_cache[CPU->id].last = cmag; |
361 | cache->mag_cache[CPU->id].last = cmag; |
354 | cache->mag_cache[CPU->id].current = newmag; |
362 | cache->mag_cache[CPU->id].current = newmag; |
355 | return newmag; |
363 | return newmag; |
356 | } |
364 | } |
357 | 365 | ||
358 | /** |
366 | /** |
359 | * Try to find object in CPU-cache magazines |
367 | * Try to find object in CPU-cache magazines |
360 | * |
368 | * |
361 | * @return Pointer to object or NULL if not available |
369 | * @return Pointer to object or NULL if not available |
362 | */ |
370 | */ |
363 | static void * magazine_obj_get(slab_cache_t *cache) |
371 | static void * magazine_obj_get(slab_cache_t *cache) |
364 | { |
372 | { |
365 | slab_magazine_t *mag; |
373 | slab_magazine_t *mag; |
366 | void *obj; |
374 | void *obj; |
367 | 375 | ||
368 | if (!CPU) |
376 | if (!CPU) |
369 | return NULL; |
377 | return NULL; |
370 | 378 | ||
371 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
379 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
372 | 380 | ||
373 | mag = get_full_current_mag(cache); |
381 | mag = get_full_current_mag(cache); |
374 | if (!mag) { |
382 | if (!mag) { |
375 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
383 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
376 | return NULL; |
384 | return NULL; |
377 | } |
385 | } |
378 | obj = mag->objs[--mag->busy]; |
386 | obj = mag->objs[--mag->busy]; |
379 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
387 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
380 | atomic_dec(&cache->cached_objs); |
388 | atomic_dec(&cache->cached_objs); |
381 | 389 | ||
382 | return obj; |
390 | return obj; |
383 | } |
391 | } |
384 | 392 | ||
385 | /** |
393 | /** |
386 | * Assure that the current magazine is empty, return pointer to it, or NULL if |
394 | * Assure that the current magazine is empty, return pointer to it, or NULL if |
387 | * no empty magazine is available and cannot be allocated |
395 | * no empty magazine is available and cannot be allocated |
388 | * |
396 | * |
389 | * Assume mag_cache[CPU->id].lock is held |
397 | * Assume mag_cache[CPU->id].lock is held |
390 | * |
398 | * |
391 | * We have 2 magazines bound to processor. |
399 | * We have 2 magazines bound to processor. |
392 | * First try the current. |
400 | * First try the current. |
393 | * If full, try the last. |
401 | * If full, try the last. |
394 | * If full, put to magazines list. |
402 | * If full, put to magazines list. |
395 | * allocate new, exchange last & current |
403 | * allocate new, exchange last & current |
396 | * |
404 | * |
397 | */ |
405 | */ |
398 | static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache) |
406 | static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache) |
399 | { |
407 | { |
400 | slab_magazine_t *cmag,*lastmag,*newmag; |
408 | slab_magazine_t *cmag,*lastmag,*newmag; |
401 | 409 | ||
402 | cmag = cache->mag_cache[CPU->id].current; |
410 | cmag = cache->mag_cache[CPU->id].current; |
403 | lastmag = cache->mag_cache[CPU->id].last; |
411 | lastmag = cache->mag_cache[CPU->id].last; |
404 | 412 | ||
405 | if (cmag) { |
413 | if (cmag) { |
406 | if (cmag->busy < cmag->size) |
414 | if (cmag->busy < cmag->size) |
407 | return cmag; |
415 | return cmag; |
408 | if (lastmag && lastmag->busy < lastmag->size) { |
416 | if (lastmag && lastmag->busy < lastmag->size) { |
409 | cache->mag_cache[CPU->id].last = cmag; |
417 | cache->mag_cache[CPU->id].last = cmag; |
410 | cache->mag_cache[CPU->id].current = lastmag; |
418 | cache->mag_cache[CPU->id].current = lastmag; |
411 | return lastmag; |
419 | return lastmag; |
412 | } |
420 | } |
413 | } |
421 | } |
414 | /* current | last are full | nonexistent, allocate new */ |
422 | /* current | last are full | nonexistent, allocate new */ |
415 | /* We do not want to sleep just because of caching */ |
423 | /* We do not want to sleep just because of caching */ |
416 | /* Especially we do not want reclaiming to start, as |
424 | /* Especially we do not want reclaiming to start, as |
417 | * this would deadlock */ |
425 | * this would deadlock */ |
418 | newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); |
426 | newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); |
419 | if (!newmag) |
427 | if (!newmag) |
420 | return NULL; |
428 | return NULL; |
421 | newmag->size = SLAB_MAG_SIZE; |
429 | newmag->size = SLAB_MAG_SIZE; |
422 | newmag->busy = 0; |
430 | newmag->busy = 0; |
423 | 431 | ||
424 | /* Flush last to magazine list */ |
432 | /* Flush last to magazine list */ |
425 | if (lastmag) { |
433 | if (lastmag) { |
426 | spinlock_lock(&cache->lock); |
434 | spinlock_lock(&cache->lock); |
427 | list_prepend(&lastmag->link, &cache->magazines); |
435 | list_prepend(&lastmag->link, &cache->magazines); |
428 | spinlock_unlock(&cache->lock); |
436 | spinlock_unlock(&cache->lock); |
429 | } |
437 | } |
430 | /* Move current as last, save new as current */ |
438 | /* Move current as last, save new as current */ |
431 | cache->mag_cache[CPU->id].last = cmag; |
439 | cache->mag_cache[CPU->id].last = cmag; |
432 | cache->mag_cache[CPU->id].current = newmag; |
440 | cache->mag_cache[CPU->id].current = newmag; |
433 | 441 | ||
434 | return newmag; |
442 | return newmag; |
435 | } |
443 | } |
436 | 444 | ||
437 | /** |
445 | /** |
438 | * Put object into CPU-cache magazine |
446 | * Put object into CPU-cache magazine |
439 | * |
447 | * |
440 | * @return 0 - success, -1 - could not get memory |
448 | * @return 0 - success, -1 - could not get memory |
441 | */ |
449 | */ |
442 | static int magazine_obj_put(slab_cache_t *cache, void *obj) |
450 | static int magazine_obj_put(slab_cache_t *cache, void *obj) |
443 | { |
451 | { |
444 | slab_magazine_t *mag; |
452 | slab_magazine_t *mag; |
445 | 453 | ||
446 | if (!CPU) |
454 | if (!CPU) |
447 | return -1; |
455 | return -1; |
448 | 456 | ||
449 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
457 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
450 | 458 | ||
451 | mag = make_empty_current_mag(cache); |
459 | mag = make_empty_current_mag(cache); |
452 | if (!mag) { |
460 | if (!mag) { |
453 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
461 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
454 | return -1; |
462 | return -1; |
455 | } |
463 | } |
456 | 464 | ||
457 | mag->objs[mag->busy++] = obj; |
465 | mag->objs[mag->busy++] = obj; |
458 | 466 | ||
459 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
467 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
460 | atomic_inc(&cache->cached_objs); |
468 | atomic_inc(&cache->cached_objs); |
461 | return 0; |
469 | return 0; |
462 | } |
470 | } |
463 | 471 | ||
464 | 472 | ||
465 | /**************************************/ |
473 | /**************************************/ |
466 | /* SLAB CACHE functions */ |
474 | /* SLAB CACHE functions */ |
467 | 475 | ||
468 | /** Return number of objects that fit in certain cache size */ |
476 | /** Return number of objects that fit in certain cache size */ |
469 | static int comp_objects(slab_cache_t *cache) |
477 | static int comp_objects(slab_cache_t *cache) |
470 | { |
478 | { |
471 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
479 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
472 | return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size; |
480 | return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size; |
473 | else |
481 | else |
474 | return (PAGE_SIZE << cache->order) / cache->size; |
482 | return (PAGE_SIZE << cache->order) / cache->size; |
475 | } |
483 | } |
476 | 484 | ||
477 | /** Return wasted space in slab */ |
485 | /** Return wasted space in slab */ |
478 | static int badness(slab_cache_t *cache) |
486 | static int badness(slab_cache_t *cache) |
479 | { |
487 | { |
480 | int objects; |
488 | int objects; |
481 | int ssize; |
489 | int ssize; |
482 | 490 | ||
483 | objects = comp_objects(cache); |
491 | objects = comp_objects(cache); |
484 | ssize = PAGE_SIZE << cache->order; |
492 | ssize = PAGE_SIZE << cache->order; |
485 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
493 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
486 | ssize -= sizeof(slab_t); |
494 | ssize -= sizeof(slab_t); |
487 | return ssize - objects*cache->size; |
495 | return ssize - objects*cache->size; |
488 | } |
496 | } |
489 | 497 | ||
490 | /** Initialize allocated memory as a slab cache */ |
498 | /** Initialize allocated memory as a slab cache */ |
491 | static void |
499 | static void |
492 | _slab_cache_create(slab_cache_t *cache, |
500 | _slab_cache_create(slab_cache_t *cache, |
493 | char *name, |
501 | char *name, |
494 | size_t size, |
502 | size_t size, |
495 | size_t align, |
503 | size_t align, |
496 | int (*constructor)(void *obj, int kmflag), |
504 | int (*constructor)(void *obj, int kmflag), |
497 | void (*destructor)(void *obj), |
505 | void (*destructor)(void *obj), |
498 | int flags) |
506 | int flags) |
499 | { |
507 | { |
500 | int i; |
508 | int i; |
501 | int pages; |
509 | int pages; |
502 | 510 | ||
503 | memsetb((__address)cache, sizeof(*cache), 0); |
511 | memsetb((__address)cache, sizeof(*cache), 0); |
504 | cache->name = name; |
512 | cache->name = name; |
505 | 513 | ||
506 | if (align < sizeof(__native)) |
514 | if (align < sizeof(__native)) |
507 | align = sizeof(__native); |
515 | align = sizeof(__native); |
508 | size = ALIGN_UP(size, align); |
516 | size = ALIGN_UP(size, align); |
509 | 517 | ||
510 | cache->size = size; |
518 | cache->size = size; |
511 | 519 | ||
512 | cache->constructor = constructor; |
520 | cache->constructor = constructor; |
513 | cache->destructor = destructor; |
521 | cache->destructor = destructor; |
514 | cache->flags = flags; |
522 | cache->flags = flags; |
515 | 523 | ||
516 | list_initialize(&cache->full_slabs); |
524 | list_initialize(&cache->full_slabs); |
517 | list_initialize(&cache->partial_slabs); |
525 | list_initialize(&cache->partial_slabs); |
518 | list_initialize(&cache->magazines); |
526 | list_initialize(&cache->magazines); |
519 | spinlock_initialize(&cache->lock, "cachelock"); |
527 | spinlock_initialize(&cache->lock, "cachelock"); |
520 | if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) { |
528 | if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) { |
521 | for (i=0; i < config.cpu_count; i++) { |
529 | for (i=0; i < config.cpu_count; i++) { |
522 | memsetb((__address)&cache->mag_cache[i], |
530 | memsetb((__address)&cache->mag_cache[i], |
523 | sizeof(cache->mag_cache[i]), 0); |
531 | sizeof(cache->mag_cache[i]), 0); |
524 | spinlock_initialize(&cache->mag_cache[i].lock, |
532 | spinlock_initialize(&cache->mag_cache[i].lock, |
525 | "cpucachelock"); |
533 | "cpucachelock"); |
526 | } |
534 | } |
527 | } |
535 | } |
528 | 536 | ||
529 | /* Compute slab sizes, object counts in slabs etc. */ |
537 | /* Compute slab sizes, object counts in slabs etc. */ |
530 | if (cache->size < SLAB_INSIDE_SIZE) |
538 | if (cache->size < SLAB_INSIDE_SIZE) |
531 | cache->flags |= SLAB_CACHE_SLINSIDE; |
539 | cache->flags |= SLAB_CACHE_SLINSIDE; |
532 | 540 | ||
533 | /* Minimum slab order */ |
541 | /* Minimum slab order */ |
534 | pages = ((cache->size-1) >> PAGE_WIDTH) + 1; |
542 | pages = ((cache->size-1) >> PAGE_WIDTH) + 1; |
535 | cache->order = fnzb(pages); |
543 | cache->order = fnzb(pages); |
536 | 544 | ||
537 | while (badness(cache) > SLAB_MAX_BADNESS(cache)) { |
545 | while (badness(cache) > SLAB_MAX_BADNESS(cache)) { |
538 | cache->order += 1; |
546 | cache->order += 1; |
539 | } |
547 | } |
540 | cache->objects = comp_objects(cache); |
548 | cache->objects = comp_objects(cache); |
541 | /* If info fits in, put it inside */ |
549 | /* If info fits in, put it inside */ |
542 | if (badness(cache) > sizeof(slab_t)) |
550 | if (badness(cache) > sizeof(slab_t)) |
543 | cache->flags |= SLAB_CACHE_SLINSIDE; |
551 | cache->flags |= SLAB_CACHE_SLINSIDE; |
544 | 552 | ||
545 | spinlock_lock(&slab_cache_lock); |
553 | spinlock_lock(&slab_cache_lock); |
546 | 554 | ||
547 | list_append(&cache->link, &slab_cache_list); |
555 | list_append(&cache->link, &slab_cache_list); |
548 | 556 | ||
549 | spinlock_unlock(&slab_cache_lock); |
557 | spinlock_unlock(&slab_cache_lock); |
550 | } |
558 | } |
551 | 559 | ||
552 | /** Create slab cache */ |
560 | /** Create slab cache */ |
553 | slab_cache_t * slab_cache_create(char *name, |
561 | slab_cache_t * slab_cache_create(char *name, |
554 | size_t size, |
562 | size_t size, |
555 | size_t align, |
563 | size_t align, |
556 | int (*constructor)(void *obj, int kmflag), |
564 | int (*constructor)(void *obj, int kmflag), |
557 | void (*destructor)(void *obj), |
565 | void (*destructor)(void *obj), |
558 | int flags) |
566 | int flags) |
559 | { |
567 | { |
560 | slab_cache_t *cache; |
568 | slab_cache_t *cache; |
561 | 569 | ||
562 | cache = slab_alloc(&slab_cache_cache, 0); |
570 | cache = slab_alloc(&slab_cache_cache, 0); |
563 | _slab_cache_create(cache, name, size, align, constructor, destructor, |
571 | _slab_cache_create(cache, name, size, align, constructor, destructor, |
564 | flags); |
572 | flags); |
565 | return cache; |
573 | return cache; |
566 | } |
574 | } |
567 | 575 | ||
568 | /** |
576 | /** |
569 | * Reclaim space occupied by objects that are already free |
577 | * Reclaim space occupied by objects that are already free |
570 | * |
578 | * |
571 | * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing |
579 | * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing |
572 | * @return Number of freed pages |
580 | * @return Number of freed pages |
573 | */ |
581 | */ |
574 | static count_t _slab_reclaim(slab_cache_t *cache, int flags) |
582 | static count_t _slab_reclaim(slab_cache_t *cache, int flags) |
575 | { |
583 | { |
576 | int i; |
584 | int i; |
577 | slab_magazine_t *mag; |
585 | slab_magazine_t *mag; |
578 | link_t *cur; |
586 | link_t *cur; |
579 | count_t frames = 0; |
587 | count_t frames = 0; |
580 | 588 | ||
581 | if (cache->flags & SLAB_CACHE_NOMAGAZINE) |
589 | if (cache->flags & SLAB_CACHE_NOMAGAZINE) |
582 | return 0; /* Nothing to do */ |
590 | return 0; /* Nothing to do */ |
583 | 591 | ||
584 | /* First lock all cpu caches, then the complete cache lock */ |
592 | /* First lock all cpu caches, then the complete cache lock */ |
585 | if (flags & SLAB_RECLAIM_ALL) { |
593 | if (flags & SLAB_RECLAIM_ALL) { |
586 | for (i=0; i < config.cpu_count; i++) |
594 | for (i=0; i < config.cpu_count; i++) |
587 | spinlock_lock(&cache->mag_cache[i].lock); |
595 | spinlock_lock(&cache->mag_cache[i].lock); |
588 | } |
596 | } |
589 | spinlock_lock(&cache->lock); |
597 | spinlock_lock(&cache->lock); |
590 | 598 | ||
591 | if (flags & SLAB_RECLAIM_ALL) { |
599 | if (flags & SLAB_RECLAIM_ALL) { |
592 | /* Aggressive memfree */ |
600 | /* Aggressive memfree */ |
593 | /* Destroy CPU magazines */ |
601 | /* Destroy CPU magazines */ |
594 | for (i=0; i<config.cpu_count; i++) { |
602 | for (i=0; i<config.cpu_count; i++) { |
595 | mag = cache->mag_cache[i].current; |
603 | mag = cache->mag_cache[i].current; |
596 | if (mag) |
604 | if (mag) |
597 | frames += magazine_destroy(cache, mag); |
605 | frames += magazine_destroy(cache, mag); |
598 | cache->mag_cache[i].current = NULL; |
606 | cache->mag_cache[i].current = NULL; |
599 | 607 | ||
600 | mag = cache->mag_cache[i].last; |
608 | mag = cache->mag_cache[i].last; |
601 | if (mag) |
609 | if (mag) |
602 | frames += magazine_destroy(cache, mag); |
610 | frames += magazine_destroy(cache, mag); |
603 | cache->mag_cache[i].last = NULL; |
611 | cache->mag_cache[i].last = NULL; |
604 | } |
612 | } |
605 | } |
613 | } |
606 | /* Destroy full magazines */ |
614 | /* Destroy full magazines */ |
607 | cur=cache->magazines.prev; |
615 | cur=cache->magazines.prev; |
608 | 616 | ||
609 | while (cur != &cache->magazines) { |
617 | while (cur != &cache->magazines) { |
610 | mag = list_get_instance(cur, slab_magazine_t, link); |
618 | mag = list_get_instance(cur, slab_magazine_t, link); |
611 | 619 | ||
612 | cur = cur->prev; |
620 | cur = cur->prev; |
613 | list_remove(&mag->link); |
621 | list_remove(&mag->link); |
614 | frames += magazine_destroy(cache,mag); |
622 | frames += magazine_destroy(cache,mag); |
615 | /* If we do not do full reclaim, break |
623 | /* If we do not do full reclaim, break |
616 | * as soon as something is freed */ |
624 | * as soon as something is freed */ |
617 | if (!(flags & SLAB_RECLAIM_ALL) && frames) |
625 | if (!(flags & SLAB_RECLAIM_ALL) && frames) |
618 | break; |
626 | break; |
619 | } |
627 | } |
620 | 628 | ||
621 | spinlock_unlock(&cache->lock); |
629 | spinlock_unlock(&cache->lock); |
- | 630 | /* We can release the cache locks now */ |
|
622 | if (flags & SLAB_RECLAIM_ALL) { |
631 | if (flags & SLAB_RECLAIM_ALL) { |
623 | for (i=0; i < config.cpu_count; i++) |
632 | for (i=0; i < config.cpu_count; i++) |
624 | spinlock_unlock(&cache->mag_cache[i].lock); |
633 | spinlock_unlock(&cache->mag_cache[i].lock); |
625 | } |
634 | } |
626 | 635 | ||
627 | return frames; |
636 | return frames; |
628 | } |
637 | } |
629 | 638 | ||
630 | /** Check that there are no slabs and remove cache from system */ |
639 | /** Check that there are no slabs and remove cache from system */ |
631 | void slab_cache_destroy(slab_cache_t *cache) |
640 | void slab_cache_destroy(slab_cache_t *cache) |
632 | { |
641 | { |
633 | /* Do not lock anything, we assume the software is correct and |
642 | /* Do not lock anything, we assume the software is correct and |
634 | * does not touch the cache when it decides to destroy it */ |
643 | * does not touch the cache when it decides to destroy it */ |
635 | 644 | ||
636 | /* Destroy all magazines */ |
645 | /* Destroy all magazines */ |
637 | _slab_reclaim(cache, SLAB_RECLAIM_ALL); |
646 | _slab_reclaim(cache, SLAB_RECLAIM_ALL); |
638 | 647 | ||
639 | /* All slabs must be empty */ |
648 | /* All slabs must be empty */ |
640 | if (!list_empty(&cache->full_slabs) \ |
649 | if (!list_empty(&cache->full_slabs) \ |
641 | || !list_empty(&cache->partial_slabs)) |
650 | || !list_empty(&cache->partial_slabs)) |
642 | panic("Destroying cache that is not empty."); |
651 | panic("Destroying cache that is not empty."); |
643 | 652 | ||
644 | spinlock_lock(&slab_cache_lock); |
653 | spinlock_lock(&slab_cache_lock); |
645 | list_remove(&cache->link); |
654 | list_remove(&cache->link); |
646 | spinlock_unlock(&slab_cache_lock); |
655 | spinlock_unlock(&slab_cache_lock); |
647 | 656 | ||
648 | slab_free(&slab_cache_cache, cache); |
657 | slab_free(&slab_cache_cache, cache); |
649 | } |
658 | } |
650 | 659 | ||
651 | /** Allocate new object from cache - if no flags given, always returns |
660 | /** Allocate new object from cache - if no flags given, always returns |
652 | memory */ |
661 | memory */ |
653 | void * slab_alloc(slab_cache_t *cache, int flags) |
662 | void * slab_alloc(slab_cache_t *cache, int flags) |
654 | { |
663 | { |
655 | ipl_t ipl; |
664 | ipl_t ipl; |
656 | void *result = NULL; |
665 | void *result = NULL; |
657 | 666 | ||
658 | /* Disable interrupts to avoid deadlocks with interrupt handlers */ |
667 | /* Disable interrupts to avoid deadlocks with interrupt handlers */ |
659 | ipl = interrupts_disable(); |
668 | ipl = interrupts_disable(); |
660 | 669 | ||
661 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) |
670 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) |
662 | result = magazine_obj_get(cache); |
671 | result = magazine_obj_get(cache); |
663 | 672 | ||
664 | if (!result) { |
673 | if (!result) { |
665 | spinlock_lock(&cache->lock); |
674 | spinlock_lock(&cache->lock); |
666 | result = slab_obj_create(cache, flags); |
675 | result = slab_obj_create(cache, flags); |
667 | spinlock_unlock(&cache->lock); |
676 | spinlock_unlock(&cache->lock); |
668 | } |
677 | } |
669 | 678 | ||
670 | interrupts_restore(ipl); |
679 | interrupts_restore(ipl); |
671 | 680 | ||
672 | if (result) |
681 | if (result) |
673 | atomic_inc(&cache->allocated_objs); |
682 | atomic_inc(&cache->allocated_objs); |
674 | 683 | ||
675 | return result; |
684 | return result; |
676 | } |
685 | } |
677 | 686 | ||
678 | /** Return object to cache, use slab if known */ |
687 | /** Return object to cache, use slab if known */ |
679 | static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) |
688 | static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) |
680 | { |
689 | { |
681 | ipl_t ipl; |
690 | ipl_t ipl; |
682 | 691 | ||
683 | ipl = interrupts_disable(); |
692 | ipl = interrupts_disable(); |
684 | 693 | ||
685 | if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \ |
694 | if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \ |
686 | || magazine_obj_put(cache, obj)) { |
695 | || magazine_obj_put(cache, obj)) { |
687 | spinlock_lock(&cache->lock); |
696 | spinlock_lock(&cache->lock); |
688 | slab_obj_destroy(cache, obj, slab); |
697 | slab_obj_destroy(cache, obj, slab); |
689 | spinlock_unlock(&cache->lock); |
698 | spinlock_unlock(&cache->lock); |
690 | } |
699 | } |
691 | interrupts_restore(ipl); |
700 | interrupts_restore(ipl); |
692 | atomic_dec(&cache->allocated_objs); |
701 | atomic_dec(&cache->allocated_objs); |
693 | } |
702 | } |
694 | 703 | ||
695 | /** Return slab object to cache */ |
704 | /** Return slab object to cache */ |
696 | void slab_free(slab_cache_t *cache, void *obj) |
705 | void slab_free(slab_cache_t *cache, void *obj) |
697 | { |
706 | { |
698 | _slab_free(cache,obj,NULL); |
707 | _slab_free(cache,obj,NULL); |
699 | } |
708 | } |
700 | 709 | ||
701 | /* Go through all caches and reclaim what is possible */ |
710 | /* Go through all caches and reclaim what is possible */ |
702 | count_t slab_reclaim(int flags) |
711 | count_t slab_reclaim(int flags) |
703 | { |
712 | { |
704 | slab_cache_t *cache; |
713 | slab_cache_t *cache; |
705 | link_t *cur; |
714 | link_t *cur; |
706 | count_t frames = 0; |
715 | count_t frames = 0; |
707 | 716 | ||
708 | spinlock_lock(&slab_cache_lock); |
717 | spinlock_lock(&slab_cache_lock); |
709 | 718 | ||
710 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
719 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
711 | cache = list_get_instance(cur, slab_cache_t, link); |
720 | cache = list_get_instance(cur, slab_cache_t, link); |
712 | frames += _slab_reclaim(cache, flags); |
721 | frames += _slab_reclaim(cache, flags); |
713 | } |
722 | } |
714 | 723 | ||
715 | spinlock_unlock(&slab_cache_lock); |
724 | spinlock_unlock(&slab_cache_lock); |
716 | 725 | ||
717 | return frames; |
726 | return frames; |
718 | } |
727 | } |
719 | 728 | ||
720 | 729 | ||
721 | /* Print list of slabs */ |
730 | /* Print list of slabs */ |
722 | void slab_print_list(void) |
731 | void slab_print_list(void) |
723 | { |
732 | { |
724 | slab_cache_t *cache; |
733 | slab_cache_t *cache; |
725 | link_t *cur; |
734 | link_t *cur; |
726 | 735 | ||
727 | spinlock_lock(&slab_cache_lock); |
736 | spinlock_lock(&slab_cache_lock); |
728 | printf("SLAB name\tOsize\tPages\tObj/pg\tSlabs\tCached\tAllocobjs\tCtl\n"); |
737 | printf("SLAB name\tOsize\tPages\tObj/pg\tSlabs\tCached\tAllocobjs\tCtl\n"); |
729 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
738 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
730 | cache = list_get_instance(cur, slab_cache_t, link); |
739 | cache = list_get_instance(cur, slab_cache_t, link); |
731 | printf("%s\t%d\t%d\t%d\t%d\t%d\t%d\t\t%s\n", cache->name, cache->size, |
740 | printf("%s\t%d\t%d\t%d\t%d\t%d\t%d\t\t%s\n", cache->name, cache->size, |
732 | (1 << cache->order), cache->objects, |
741 | (1 << cache->order), cache->objects, |
733 | atomic_get(&cache->allocated_slabs), |
742 | atomic_get(&cache->allocated_slabs), |
734 | atomic_get(&cache->cached_objs), |
743 | atomic_get(&cache->cached_objs), |
735 | atomic_get(&cache->allocated_objs), |
744 | atomic_get(&cache->allocated_objs), |
736 | cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out"); |
745 | cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out"); |
737 | } |
746 | } |
738 | spinlock_unlock(&slab_cache_lock); |
747 | spinlock_unlock(&slab_cache_lock); |
739 | } |
748 | } |
740 | 749 | ||
741 | void slab_cache_init(void) |
750 | void slab_cache_init(void) |
742 | { |
751 | { |
743 | int i, size; |
752 | int i, size; |
744 | 753 | ||
745 | /* Initialize magazine cache */ |
754 | /* Initialize magazine cache */ |
746 | _slab_cache_create(&mag_cache, |
755 | _slab_cache_create(&mag_cache, |
747 | "slab_magazine", |
756 | "slab_magazine", |
748 | sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*), |
757 | sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*), |
749 | sizeof(__address), |
758 | sizeof(__address), |
750 | NULL, NULL, |
759 | NULL, NULL, |
751 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
760 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
752 | /* Initialize slab_cache cache */ |
761 | /* Initialize slab_cache cache */ |
753 | _slab_cache_create(&slab_cache_cache, |
762 | _slab_cache_create(&slab_cache_cache, |
754 | "slab_cache", |
763 | "slab_cache", |
755 | sizeof(slab_cache_cache) + config.cpu_count*sizeof(slab_cache_cache.mag_cache[0]), |
764 | sizeof(slab_cache_cache) + config.cpu_count*sizeof(slab_cache_cache.mag_cache[0]), |
756 | sizeof(__address), |
765 | sizeof(__address), |
757 | NULL, NULL, |
766 | NULL, NULL, |
758 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
767 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
759 | /* Initialize external slab cache */ |
768 | /* Initialize external slab cache */ |
760 | slab_extern_cache = slab_cache_create("slab_extern", |
769 | slab_extern_cache = slab_cache_create("slab_extern", |
761 | sizeof(slab_t), |
770 | sizeof(slab_t), |
762 | 0, NULL, NULL, |
771 | 0, NULL, NULL, |
763 | SLAB_CACHE_SLINSIDE); |
772 | SLAB_CACHE_SLINSIDE); |
764 | 773 | ||
765 | /* Initialize structures for malloc */ |
774 | /* Initialize structures for malloc */ |
766 | for (i=0, size=(1<<SLAB_MIN_MALLOC_W); |
775 | for (i=0, size=(1<<SLAB_MIN_MALLOC_W); |
767 | i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1); |
776 | i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1); |
768 | i++, size <<= 1) { |
777 | i++, size <<= 1) { |
769 | malloc_caches[i] = slab_cache_create(malloc_names[i], |
778 | malloc_caches[i] = slab_cache_create(malloc_names[i], |
770 | size, 0, |
779 | size, 0, |
771 | NULL,NULL,0); |
780 | NULL,NULL,0); |
772 | } |
781 | } |
773 | } |
782 | } |
774 | 783 | ||
775 | /**************************************/ |
784 | /**************************************/ |
776 | /* kalloc/kfree functions */ |
785 | /* kalloc/kfree functions */ |
777 | void * kalloc(unsigned int size, int flags) |
786 | void * kalloc(unsigned int size, int flags) |
778 | { |
787 | { |
779 | int idx; |
788 | int idx; |
780 | 789 | ||
781 | ASSERT( size && size <= (1 << SLAB_MAX_MALLOC_W)); |
790 | ASSERT( size && size <= (1 << SLAB_MAX_MALLOC_W)); |
782 | 791 | ||
783 | if (size < (1 << SLAB_MIN_MALLOC_W)) |
792 | if (size < (1 << SLAB_MIN_MALLOC_W)) |
784 | size = (1 << SLAB_MIN_MALLOC_W); |
793 | size = (1 << SLAB_MIN_MALLOC_W); |
785 | 794 | ||
786 | idx = fnzb(size-1) - SLAB_MIN_MALLOC_W + 1; |
795 | idx = fnzb(size-1) - SLAB_MIN_MALLOC_W + 1; |
787 | 796 | ||
788 | return slab_alloc(malloc_caches[idx], flags); |
797 | return slab_alloc(malloc_caches[idx], flags); |
789 | } |
798 | } |
790 | 799 | ||
791 | 800 | ||
792 | void kfree(void *obj) |
801 | void kfree(void *obj) |
793 | { |
802 | { |
794 | slab_t *slab = obj2slab(obj); |
803 | slab_t *slab = obj2slab(obj); |
795 | 804 | ||
796 | _slab_free(slab->cache, obj, slab); |
805 | _slab_free(slab->cache, obj, slab); |
797 | } |
806 | } |
798 | 807 |