Rev 1757 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1757 | Rev 1760 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2006 Ondrej Palkovsky |
2 | * Copyright (C) 2006 Ondrej Palkovsky |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /** @addtogroup genericmm |
29 | /** @addtogroup genericmm |
30 | * @{ |
30 | * @{ |
31 | */ |
31 | */ |
32 | 32 | ||
33 | /** |
33 | /** |
34 | * @file |
34 | * @file |
35 | * @brief Slab allocator. |
35 | * @brief Slab allocator. |
36 | * |
36 | * |
37 | * The slab allocator is closely modelled after OpenSolaris slab allocator. |
37 | * The slab allocator is closely modelled after OpenSolaris slab allocator. |
38 | * @see http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/ |
38 | * @see http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/ |
39 | * |
39 | * |
40 | * with the following exceptions: |
40 | * with the following exceptions: |
41 | * @li empty slabs are deallocated immediately |
41 | * @li empty slabs are deallocated immediately |
42 | * (in Linux they are kept in linked list, in Solaris ???) |
42 | * (in Linux they are kept in linked list, in Solaris ???) |
43 | * @li empty magazines are deallocated when not needed |
43 | * @li empty magazines are deallocated when not needed |
44 | * (in Solaris they are held in linked list in slab cache) |
44 | * (in Solaris they are held in linked list in slab cache) |
45 | * |
45 | * |
46 | * Following features are not currently supported but would be easy to do: |
46 | * Following features are not currently supported but would be easy to do: |
47 | * @li cache coloring |
47 | * @li cache coloring |
48 | * @li dynamic magazine growing (different magazine sizes are already |
48 | * @li dynamic magazine growing (different magazine sizes are already |
49 | * supported, but we would need to adjust allocation strategy) |
49 | * supported, but we would need to adjust allocation strategy) |
50 | * |
50 | * |
51 | * The slab allocator supports per-CPU caches ('magazines') to facilitate |
51 | * The slab allocator supports per-CPU caches ('magazines') to facilitate |
52 | * good SMP scaling. |
52 | * good SMP scaling. |
53 | * |
53 | * |
54 | * When a new object is being allocated, it is first checked, if it is |
54 | * When a new object is being allocated, it is first checked, if it is |
55 | * available in a CPU-bound magazine. If it is not found there, it is |
55 | * available in a CPU-bound magazine. If it is not found there, it is |
56 | * allocated from a CPU-shared slab - if a partially full one is found, |
56 | * allocated from a CPU-shared slab - if a partially full one is found, |
57 | * it is used, otherwise a new one is allocated. |
57 | * it is used, otherwise a new one is allocated. |
58 | * |
58 | * |
59 | * When an object is being deallocated, it is put to a CPU-bound magazine. |
59 | * When an object is being deallocated, it is put to a CPU-bound magazine. |
60 | * If there is no such magazine, a new one is allocated (if this fails, |
60 | * If there is no such magazine, a new one is allocated (if this fails, |
61 | * the object is deallocated into slab). If the magazine is full, it is |
61 | * the object is deallocated into slab). If the magazine is full, it is |
62 | * put into cpu-shared list of magazines and a new one is allocated. |
62 | * put into cpu-shared list of magazines and a new one is allocated. |
63 | * |
63 | * |
64 | * The CPU-bound magazine is actually a pair of magazines in order to avoid |
64 | * The CPU-bound magazine is actually a pair of magazines in order to avoid |
65 | * thrashing when somebody is allocating/deallocating 1 item at the magazine |
65 | * thrashing when somebody is allocating/deallocating 1 item at the magazine |
66 | * size boundary. LIFO order is enforced, which should avoid fragmentation |
66 | * size boundary. LIFO order is enforced, which should avoid fragmentation |
67 | * as much as possible. |
67 | * as much as possible. |
68 | * |
68 | * |
69 | * Every cache contains list of full slabs and list of partially full slabs. |
69 | * Every cache contains list of full slabs and list of partially full slabs. |
70 | * Empty slabs are immediately freed (thrashing will be avoided because |
70 | * Empty slabs are immediately freed (thrashing will be avoided because |
71 | * of magazines). |
71 | * of magazines). |
72 | * |
72 | * |
73 | * The slab information structure is kept inside the data area, if possible. |
73 | * The slab information structure is kept inside the data area, if possible. |
74 | * The cache can be marked that it should not use magazines. This is used |
74 | * The cache can be marked that it should not use magazines. This is used |
75 | * only for slab related caches to avoid deadlocks and infinite recursion |
75 | * only for slab related caches to avoid deadlocks and infinite recursion |
76 | * (the slab allocator uses itself for allocating all it's control structures). |
76 | * (the slab allocator uses itself for allocating all it's control structures). |
77 | * |
77 | * |
78 | * The slab allocator allocates a lot of space and does not free it. When |
78 | * The slab allocator allocates a lot of space and does not free it. When |
79 | * the frame allocator fails to allocate a frame, it calls slab_reclaim(). |
79 | * the frame allocator fails to allocate a frame, it calls slab_reclaim(). |
80 | * It tries 'light reclaim' first, then brutal reclaim. The light reclaim |
80 | * It tries 'light reclaim' first, then brutal reclaim. The light reclaim |
81 | * releases slabs from cpu-shared magazine-list, until at least 1 slab |
81 | * releases slabs from cpu-shared magazine-list, until at least 1 slab |
82 | * is deallocated in each cache (this algorithm should probably change). |
82 | * is deallocated in each cache (this algorithm should probably change). |
83 | * The brutal reclaim removes all cached objects, even from CPU-bound |
83 | * The brutal reclaim removes all cached objects, even from CPU-bound |
84 | * magazines. |
84 | * magazines. |
85 | * |
85 | * |
86 | * @todo |
86 | * @todo |
87 | * For better CPU-scaling the magazine allocation strategy should |
87 | * For better CPU-scaling the magazine allocation strategy should |
88 | * be extended. Currently, if the cache does not have magazine, it asks |
88 | * be extended. Currently, if the cache does not have magazine, it asks |
89 | * for non-cpu cached magazine cache to provide one. It might be feasible |
89 | * for non-cpu cached magazine cache to provide one. It might be feasible |
90 | * to add cpu-cached magazine cache (which would allocate it's magazines |
90 | * to add cpu-cached magazine cache (which would allocate it's magazines |
91 | * from non-cpu-cached mag. cache). This would provide a nice per-cpu |
91 | * from non-cpu-cached mag. cache). This would provide a nice per-cpu |
92 | * buffer. The other possibility is to use the per-cache |
92 | * buffer. The other possibility is to use the per-cache |
93 | * 'empty-magazine-list', which decreases competing for 1 per-system |
93 | * 'empty-magazine-list', which decreases competing for 1 per-system |
94 | * magazine cache. |
94 | * magazine cache. |
95 | * |
95 | * |
96 | * @todo |
96 | * @todo |
97 | * it might be good to add granularity of locks even to slab level, |
97 | * it might be good to add granularity of locks even to slab level, |
98 | * we could then try_spinlock over all partial slabs and thus improve |
98 | * we could then try_spinlock over all partial slabs and thus improve |
99 | * scalability even on slab level |
99 | * scalability even on slab level |
100 | */ |
100 | */ |
101 | 101 | ||
102 | #include <synch/spinlock.h> |
102 | #include <synch/spinlock.h> |
103 | #include <mm/slab.h> |
103 | #include <mm/slab.h> |
104 | #include <adt/list.h> |
104 | #include <adt/list.h> |
105 | #include <memstr.h> |
105 | #include <memstr.h> |
106 | #include <align.h> |
106 | #include <align.h> |
107 | #include <mm/frame.h> |
107 | #include <mm/frame.h> |
108 | #include <config.h> |
108 | #include <config.h> |
109 | #include <print.h> |
109 | #include <print.h> |
110 | #include <arch.h> |
110 | #include <arch.h> |
111 | #include <panic.h> |
111 | #include <panic.h> |
112 | #include <debug.h> |
112 | #include <debug.h> |
113 | #include <bitops.h> |
113 | #include <bitops.h> |
114 | 114 | ||
115 | SPINLOCK_INITIALIZE(slab_cache_lock); |
115 | SPINLOCK_INITIALIZE(slab_cache_lock); |
116 | static LIST_INITIALIZE(slab_cache_list); |
116 | static LIST_INITIALIZE(slab_cache_list); |
117 | 117 | ||
118 | /** Magazine cache */ |
118 | /** Magazine cache */ |
119 | static slab_cache_t mag_cache; |
119 | static slab_cache_t mag_cache; |
120 | /** Cache for cache descriptors */ |
120 | /** Cache for cache descriptors */ |
121 | static slab_cache_t slab_cache_cache; |
121 | static slab_cache_t slab_cache_cache; |
122 | /** Cache for external slab descriptors |
122 | /** Cache for external slab descriptors |
123 | * This time we want per-cpu cache, so do not make it static |
123 | * This time we want per-cpu cache, so do not make it static |
124 | * - using slab for internal slab structures will not deadlock, |
124 | * - using slab for internal slab structures will not deadlock, |
125 | * as all slab structures are 'small' - control structures of |
125 | * as all slab structures are 'small' - control structures of |
126 | * their caches do not require further allocation |
126 | * their caches do not require further allocation |
127 | */ |
127 | */ |
128 | static slab_cache_t *slab_extern_cache; |
128 | static slab_cache_t *slab_extern_cache; |
129 | /** Caches for malloc */ |
129 | /** Caches for malloc */ |
130 | static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1]; |
130 | static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1]; |
131 | char *malloc_names[] = { |
131 | char *malloc_names[] = { |
132 | "malloc-16","malloc-32","malloc-64","malloc-128", |
132 | "malloc-16","malloc-32","malloc-64","malloc-128", |
133 | "malloc-256","malloc-512","malloc-1K","malloc-2K", |
133 | "malloc-256","malloc-512","malloc-1K","malloc-2K", |
134 | "malloc-4K","malloc-8K","malloc-16K","malloc-32K", |
134 | "malloc-4K","malloc-8K","malloc-16K","malloc-32K", |
135 | "malloc-64K","malloc-128K","malloc-256K" |
135 | "malloc-64K","malloc-128K","malloc-256K" |
136 | }; |
136 | }; |
137 | 137 | ||
138 | /** Slab descriptor */ |
138 | /** Slab descriptor */ |
139 | typedef struct { |
139 | typedef struct { |
140 | slab_cache_t *cache; /**< Pointer to parent cache */ |
140 | slab_cache_t *cache; /**< Pointer to parent cache */ |
141 | link_t link; /* List of full/partial slabs */ |
141 | link_t link; /* List of full/partial slabs */ |
142 | void *start; /**< Start address of first available item */ |
142 | void *start; /**< Start address of first available item */ |
143 | count_t available; /**< Count of available items in this slab */ |
143 | count_t available; /**< Count of available items in this slab */ |
144 | index_t nextavail; /**< The index of next available item */ |
144 | index_t nextavail; /**< The index of next available item */ |
145 | }slab_t; |
145 | }slab_t; |
146 | 146 | ||
147 | #ifdef CONFIG_DEBUG |
147 | #ifdef CONFIG_DEBUG |
148 | static int _slab_initialized = 0; |
148 | static int _slab_initialized = 0; |
149 | #endif |
149 | #endif |
150 | 150 | ||
151 | /**************************************/ |
151 | /**************************************/ |
152 | /* Slab allocation functions */ |
152 | /* Slab allocation functions */ |
153 | 153 | ||
154 | /** |
154 | /** |
155 | * Allocate frames for slab space and initialize |
155 | * Allocate frames for slab space and initialize |
156 | * |
156 | * |
157 | */ |
157 | */ |
158 | static slab_t * slab_space_alloc(slab_cache_t *cache, int flags) |
158 | static slab_t * slab_space_alloc(slab_cache_t *cache, int flags) |
159 | { |
159 | { |
160 | void *data; |
160 | void *data; |
161 | slab_t *slab; |
161 | slab_t *slab; |
162 | size_t fsize; |
162 | size_t fsize; |
163 | int i; |
163 | int i; |
164 | int status; |
164 | int status; |
165 | pfn_t pfn; |
- | |
166 | int zone=0; |
165 | int zone=0; |
167 | 166 | ||
168 | pfn = frame_alloc_rc_zone(cache->order, FRAME_KA | flags, &status, &zone); |
167 | data = frame_alloc_rc_zone(cache->order, FRAME_KA | flags, &status, &zone); |
169 | data = (void *) PA2KA(PFN2ADDR(pfn)); |
- | |
170 | if (status != FRAME_OK) { |
168 | if (status != FRAME_OK) { |
171 | return NULL; |
169 | return NULL; |
172 | } |
170 | } |
173 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) { |
171 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) { |
174 | slab = slab_alloc(slab_extern_cache, flags); |
172 | slab = slab_alloc(slab_extern_cache, flags); |
175 | if (!slab) { |
173 | if (!slab) { |
176 | frame_free(ADDR2PFN(KA2PA(data))); |
174 | frame_free(KA2PA(data)); |
177 | return NULL; |
175 | return NULL; |
178 | } |
176 | } |
179 | } else { |
177 | } else { |
180 | fsize = (PAGE_SIZE << cache->order); |
178 | fsize = (PAGE_SIZE << cache->order); |
181 | slab = data + fsize - sizeof(*slab); |
179 | slab = data + fsize - sizeof(*slab); |
182 | } |
180 | } |
183 | 181 | ||
184 | /* Fill in slab structures */ |
182 | /* Fill in slab structures */ |
185 | for (i=0; i < (1 << cache->order); i++) |
183 | for (i=0; i < (1 << cache->order); i++) |
186 | frame_set_parent(pfn+i, slab, zone); |
184 | frame_set_parent(ADDR2PFN(KA2PA(data))+i, slab, zone); |
187 | 185 | ||
188 | slab->start = data; |
186 | slab->start = data; |
189 | slab->available = cache->objects; |
187 | slab->available = cache->objects; |
190 | slab->nextavail = 0; |
188 | slab->nextavail = 0; |
191 | slab->cache = cache; |
189 | slab->cache = cache; |
192 | 190 | ||
193 | for (i=0; i<cache->objects;i++) |
191 | for (i=0; i<cache->objects;i++) |
194 | *((int *) (slab->start + i*cache->size)) = i+1; |
192 | *((int *) (slab->start + i*cache->size)) = i+1; |
195 | 193 | ||
196 | atomic_inc(&cache->allocated_slabs); |
194 | atomic_inc(&cache->allocated_slabs); |
197 | return slab; |
195 | return slab; |
198 | } |
196 | } |
199 | 197 | ||
200 | /** |
198 | /** |
201 | * Deallocate space associated with slab |
199 | * Deallocate space associated with slab |
202 | * |
200 | * |
203 | * @return number of freed frames |
201 | * @return number of freed frames |
204 | */ |
202 | */ |
205 | static count_t slab_space_free(slab_cache_t *cache, slab_t *slab) |
203 | static count_t slab_space_free(slab_cache_t *cache, slab_t *slab) |
206 | { |
204 | { |
207 | frame_free(ADDR2PFN(KA2PA(slab->start))); |
205 | frame_free(KA2PA(slab->start)); |
208 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) |
206 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) |
209 | slab_free(slab_extern_cache, slab); |
207 | slab_free(slab_extern_cache, slab); |
210 | 208 | ||
211 | atomic_dec(&cache->allocated_slabs); |
209 | atomic_dec(&cache->allocated_slabs); |
212 | 210 | ||
213 | return 1 << cache->order; |
211 | return 1 << cache->order; |
214 | } |
212 | } |
215 | 213 | ||
216 | /** Map object to slab structure */ |
214 | /** Map object to slab structure */ |
217 | static slab_t * obj2slab(void *obj) |
215 | static slab_t * obj2slab(void *obj) |
218 | { |
216 | { |
219 | return (slab_t *)frame_get_parent(ADDR2PFN(KA2PA(obj)), 0); |
217 | return (slab_t *)frame_get_parent(ADDR2PFN(KA2PA(obj)), 0); |
220 | } |
218 | } |
221 | 219 | ||
222 | /**************************************/ |
220 | /**************************************/ |
223 | /* Slab functions */ |
221 | /* Slab functions */ |
224 | 222 | ||
225 | 223 | ||
226 | /** |
224 | /** |
227 | * Return object to slab and call a destructor |
225 | * Return object to slab and call a destructor |
228 | * |
226 | * |
229 | * @param slab If the caller knows directly slab of the object, otherwise NULL |
227 | * @param slab If the caller knows directly slab of the object, otherwise NULL |
230 | * |
228 | * |
231 | * @return Number of freed pages |
229 | * @return Number of freed pages |
232 | */ |
230 | */ |
233 | static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, |
231 | static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, |
234 | slab_t *slab) |
232 | slab_t *slab) |
235 | { |
233 | { |
236 | int freed = 0; |
234 | int freed = 0; |
237 | 235 | ||
238 | if (!slab) |
236 | if (!slab) |
239 | slab = obj2slab(obj); |
237 | slab = obj2slab(obj); |
240 | 238 | ||
241 | ASSERT(slab->cache == cache); |
239 | ASSERT(slab->cache == cache); |
242 | 240 | ||
243 | if (cache->destructor) |
241 | if (cache->destructor) |
244 | freed = cache->destructor(obj); |
242 | freed = cache->destructor(obj); |
245 | 243 | ||
246 | spinlock_lock(&cache->slablock); |
244 | spinlock_lock(&cache->slablock); |
247 | ASSERT(slab->available < cache->objects); |
245 | ASSERT(slab->available < cache->objects); |
248 | 246 | ||
249 | *((int *)obj) = slab->nextavail; |
247 | *((int *)obj) = slab->nextavail; |
250 | slab->nextavail = (obj - slab->start)/cache->size; |
248 | slab->nextavail = (obj - slab->start)/cache->size; |
251 | slab->available++; |
249 | slab->available++; |
252 | 250 | ||
253 | /* Move it to correct list */ |
251 | /* Move it to correct list */ |
254 | if (slab->available == cache->objects) { |
252 | if (slab->available == cache->objects) { |
255 | /* Free associated memory */ |
253 | /* Free associated memory */ |
256 | list_remove(&slab->link); |
254 | list_remove(&slab->link); |
257 | spinlock_unlock(&cache->slablock); |
255 | spinlock_unlock(&cache->slablock); |
258 | 256 | ||
259 | return freed + slab_space_free(cache, slab); |
257 | return freed + slab_space_free(cache, slab); |
260 | 258 | ||
261 | } else if (slab->available == 1) { |
259 | } else if (slab->available == 1) { |
262 | /* It was in full, move to partial */ |
260 | /* It was in full, move to partial */ |
263 | list_remove(&slab->link); |
261 | list_remove(&slab->link); |
264 | list_prepend(&slab->link, &cache->partial_slabs); |
262 | list_prepend(&slab->link, &cache->partial_slabs); |
265 | } |
263 | } |
266 | spinlock_unlock(&cache->slablock); |
264 | spinlock_unlock(&cache->slablock); |
267 | return freed; |
265 | return freed; |
268 | } |
266 | } |
269 | 267 | ||
270 | /** |
268 | /** |
271 | * Take new object from slab or create new if needed |
269 | * Take new object from slab or create new if needed |
272 | * |
270 | * |
273 | * @return Object address or null |
271 | * @return Object address or null |
274 | */ |
272 | */ |
275 | static void * slab_obj_create(slab_cache_t *cache, int flags) |
273 | static void * slab_obj_create(slab_cache_t *cache, int flags) |
276 | { |
274 | { |
277 | slab_t *slab; |
275 | slab_t *slab; |
278 | void *obj; |
276 | void *obj; |
279 | 277 | ||
280 | spinlock_lock(&cache->slablock); |
278 | spinlock_lock(&cache->slablock); |
281 | 279 | ||
282 | if (list_empty(&cache->partial_slabs)) { |
280 | if (list_empty(&cache->partial_slabs)) { |
283 | /* Allow recursion and reclaiming |
281 | /* Allow recursion and reclaiming |
284 | * - this should work, as the slab control structures |
282 | * - this should work, as the slab control structures |
285 | * are small and do not need to allocate with anything |
283 | * are small and do not need to allocate with anything |
286 | * other than frame_alloc when they are allocating, |
284 | * other than frame_alloc when they are allocating, |
287 | * that's why we should get recursion at most 1-level deep |
285 | * that's why we should get recursion at most 1-level deep |
288 | */ |
286 | */ |
289 | spinlock_unlock(&cache->slablock); |
287 | spinlock_unlock(&cache->slablock); |
290 | slab = slab_space_alloc(cache, flags); |
288 | slab = slab_space_alloc(cache, flags); |
291 | if (!slab) |
289 | if (!slab) |
292 | return NULL; |
290 | return NULL; |
293 | spinlock_lock(&cache->slablock); |
291 | spinlock_lock(&cache->slablock); |
294 | } else { |
292 | } else { |
295 | slab = list_get_instance(cache->partial_slabs.next, |
293 | slab = list_get_instance(cache->partial_slabs.next, |
296 | slab_t, |
294 | slab_t, |
297 | link); |
295 | link); |
298 | list_remove(&slab->link); |
296 | list_remove(&slab->link); |
299 | } |
297 | } |
300 | obj = slab->start + slab->nextavail * cache->size; |
298 | obj = slab->start + slab->nextavail * cache->size; |
301 | slab->nextavail = *((int *)obj); |
299 | slab->nextavail = *((int *)obj); |
302 | slab->available--; |
300 | slab->available--; |
303 | 301 | ||
304 | if (! slab->available) |
302 | if (! slab->available) |
305 | list_prepend(&slab->link, &cache->full_slabs); |
303 | list_prepend(&slab->link, &cache->full_slabs); |
306 | else |
304 | else |
307 | list_prepend(&slab->link, &cache->partial_slabs); |
305 | list_prepend(&slab->link, &cache->partial_slabs); |
308 | 306 | ||
309 | spinlock_unlock(&cache->slablock); |
307 | spinlock_unlock(&cache->slablock); |
310 | 308 | ||
311 | if (cache->constructor && cache->constructor(obj, flags)) { |
309 | if (cache->constructor && cache->constructor(obj, flags)) { |
312 | /* Bad, bad, construction failed */ |
310 | /* Bad, bad, construction failed */ |
313 | slab_obj_destroy(cache, obj, slab); |
311 | slab_obj_destroy(cache, obj, slab); |
314 | return NULL; |
312 | return NULL; |
315 | } |
313 | } |
316 | return obj; |
314 | return obj; |
317 | } |
315 | } |
318 | 316 | ||
319 | /**************************************/ |
317 | /**************************************/ |
320 | /* CPU-Cache slab functions */ |
318 | /* CPU-Cache slab functions */ |
321 | 319 | ||
322 | /** |
320 | /** |
323 | * Finds a full magazine in cache, takes it from list |
321 | * Finds a full magazine in cache, takes it from list |
324 | * and returns it |
322 | * and returns it |
325 | * |
323 | * |
326 | * @param first If true, return first, else last mag |
324 | * @param first If true, return first, else last mag |
327 | */ |
325 | */ |
328 | static slab_magazine_t * get_mag_from_cache(slab_cache_t *cache, |
326 | static slab_magazine_t * get_mag_from_cache(slab_cache_t *cache, |
329 | int first) |
327 | int first) |
330 | { |
328 | { |
331 | slab_magazine_t *mag = NULL; |
329 | slab_magazine_t *mag = NULL; |
332 | link_t *cur; |
330 | link_t *cur; |
333 | 331 | ||
334 | spinlock_lock(&cache->maglock); |
332 | spinlock_lock(&cache->maglock); |
335 | if (!list_empty(&cache->magazines)) { |
333 | if (!list_empty(&cache->magazines)) { |
336 | if (first) |
334 | if (first) |
337 | cur = cache->magazines.next; |
335 | cur = cache->magazines.next; |
338 | else |
336 | else |
339 | cur = cache->magazines.prev; |
337 | cur = cache->magazines.prev; |
340 | mag = list_get_instance(cur, slab_magazine_t, link); |
338 | mag = list_get_instance(cur, slab_magazine_t, link); |
341 | list_remove(&mag->link); |
339 | list_remove(&mag->link); |
342 | atomic_dec(&cache->magazine_counter); |
340 | atomic_dec(&cache->magazine_counter); |
343 | } |
341 | } |
344 | spinlock_unlock(&cache->maglock); |
342 | spinlock_unlock(&cache->maglock); |
345 | return mag; |
343 | return mag; |
346 | } |
344 | } |
347 | 345 | ||
348 | /** Prepend magazine to magazine list in cache */ |
346 | /** Prepend magazine to magazine list in cache */ |
349 | static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag) |
347 | static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag) |
350 | { |
348 | { |
351 | spinlock_lock(&cache->maglock); |
349 | spinlock_lock(&cache->maglock); |
352 | 350 | ||
353 | list_prepend(&mag->link, &cache->magazines); |
351 | list_prepend(&mag->link, &cache->magazines); |
354 | atomic_inc(&cache->magazine_counter); |
352 | atomic_inc(&cache->magazine_counter); |
355 | 353 | ||
356 | spinlock_unlock(&cache->maglock); |
354 | spinlock_unlock(&cache->maglock); |
357 | } |
355 | } |
358 | 356 | ||
359 | /** |
357 | /** |
360 | * Free all objects in magazine and free memory associated with magazine |
358 | * Free all objects in magazine and free memory associated with magazine |
361 | * |
359 | * |
362 | * @return Number of freed pages |
360 | * @return Number of freed pages |
363 | */ |
361 | */ |
364 | static count_t magazine_destroy(slab_cache_t *cache, |
362 | static count_t magazine_destroy(slab_cache_t *cache, |
365 | slab_magazine_t *mag) |
363 | slab_magazine_t *mag) |
366 | { |
364 | { |
367 | int i; |
365 | int i; |
368 | count_t frames = 0; |
366 | count_t frames = 0; |
369 | 367 | ||
370 | for (i=0;i < mag->busy; i++) { |
368 | for (i=0;i < mag->busy; i++) { |
371 | frames += slab_obj_destroy(cache, mag->objs[i], NULL); |
369 | frames += slab_obj_destroy(cache, mag->objs[i], NULL); |
372 | atomic_dec(&cache->cached_objs); |
370 | atomic_dec(&cache->cached_objs); |
373 | } |
371 | } |
374 | 372 | ||
375 | slab_free(&mag_cache, mag); |
373 | slab_free(&mag_cache, mag); |
376 | 374 | ||
377 | return frames; |
375 | return frames; |
378 | } |
376 | } |
379 | 377 | ||
380 | /** |
378 | /** |
381 | * Find full magazine, set it as current and return it |
379 | * Find full magazine, set it as current and return it |
382 | * |
380 | * |
383 | * Assume cpu_magazine lock is held |
381 | * Assume cpu_magazine lock is held |
384 | */ |
382 | */ |
385 | static slab_magazine_t * get_full_current_mag(slab_cache_t *cache) |
383 | static slab_magazine_t * get_full_current_mag(slab_cache_t *cache) |
386 | { |
384 | { |
387 | slab_magazine_t *cmag, *lastmag, *newmag; |
385 | slab_magazine_t *cmag, *lastmag, *newmag; |
388 | 386 | ||
389 | cmag = cache->mag_cache[CPU->id].current; |
387 | cmag = cache->mag_cache[CPU->id].current; |
390 | lastmag = cache->mag_cache[CPU->id].last; |
388 | lastmag = cache->mag_cache[CPU->id].last; |
391 | if (cmag) { /* First try local CPU magazines */ |
389 | if (cmag) { /* First try local CPU magazines */ |
392 | if (cmag->busy) |
390 | if (cmag->busy) |
393 | return cmag; |
391 | return cmag; |
394 | 392 | ||
395 | if (lastmag && lastmag->busy) { |
393 | if (lastmag && lastmag->busy) { |
396 | cache->mag_cache[CPU->id].current = lastmag; |
394 | cache->mag_cache[CPU->id].current = lastmag; |
397 | cache->mag_cache[CPU->id].last = cmag; |
395 | cache->mag_cache[CPU->id].last = cmag; |
398 | return lastmag; |
396 | return lastmag; |
399 | } |
397 | } |
400 | } |
398 | } |
401 | /* Local magazines are empty, import one from magazine list */ |
399 | /* Local magazines are empty, import one from magazine list */ |
402 | newmag = get_mag_from_cache(cache, 1); |
400 | newmag = get_mag_from_cache(cache, 1); |
403 | if (!newmag) |
401 | if (!newmag) |
404 | return NULL; |
402 | return NULL; |
405 | 403 | ||
406 | if (lastmag) |
404 | if (lastmag) |
407 | magazine_destroy(cache, lastmag); |
405 | magazine_destroy(cache, lastmag); |
408 | 406 | ||
409 | cache->mag_cache[CPU->id].last = cmag; |
407 | cache->mag_cache[CPU->id].last = cmag; |
410 | cache->mag_cache[CPU->id].current = newmag; |
408 | cache->mag_cache[CPU->id].current = newmag; |
411 | return newmag; |
409 | return newmag; |
412 | } |
410 | } |
413 | 411 | ||
414 | /** |
412 | /** |
415 | * Try to find object in CPU-cache magazines |
413 | * Try to find object in CPU-cache magazines |
416 | * |
414 | * |
417 | * @return Pointer to object or NULL if not available |
415 | * @return Pointer to object or NULL if not available |
418 | */ |
416 | */ |
419 | static void * magazine_obj_get(slab_cache_t *cache) |
417 | static void * magazine_obj_get(slab_cache_t *cache) |
420 | { |
418 | { |
421 | slab_magazine_t *mag; |
419 | slab_magazine_t *mag; |
422 | void *obj; |
420 | void *obj; |
423 | 421 | ||
424 | if (!CPU) |
422 | if (!CPU) |
425 | return NULL; |
423 | return NULL; |
426 | 424 | ||
427 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
425 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
428 | 426 | ||
429 | mag = get_full_current_mag(cache); |
427 | mag = get_full_current_mag(cache); |
430 | if (!mag) { |
428 | if (!mag) { |
431 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
429 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
432 | return NULL; |
430 | return NULL; |
433 | } |
431 | } |
434 | obj = mag->objs[--mag->busy]; |
432 | obj = mag->objs[--mag->busy]; |
435 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
433 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
436 | atomic_dec(&cache->cached_objs); |
434 | atomic_dec(&cache->cached_objs); |
437 | 435 | ||
438 | return obj; |
436 | return obj; |
439 | } |
437 | } |
440 | 438 | ||
441 | /** |
439 | /** |
442 | * Assure that the current magazine is empty, return pointer to it, or NULL if |
440 | * Assure that the current magazine is empty, return pointer to it, or NULL if |
443 | * no empty magazine is available and cannot be allocated |
441 | * no empty magazine is available and cannot be allocated |
444 | * |
442 | * |
445 | * Assume mag_cache[CPU->id].lock is held |
443 | * Assume mag_cache[CPU->id].lock is held |
446 | * |
444 | * |
447 | * We have 2 magazines bound to processor. |
445 | * We have 2 magazines bound to processor. |
448 | * First try the current. |
446 | * First try the current. |
449 | * If full, try the last. |
447 | * If full, try the last. |
450 | * If full, put to magazines list. |
448 | * If full, put to magazines list. |
451 | * allocate new, exchange last & current |
449 | * allocate new, exchange last & current |
452 | * |
450 | * |
453 | */ |
451 | */ |
454 | static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache) |
452 | static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache) |
455 | { |
453 | { |
456 | slab_magazine_t *cmag,*lastmag,*newmag; |
454 | slab_magazine_t *cmag,*lastmag,*newmag; |
457 | 455 | ||
458 | cmag = cache->mag_cache[CPU->id].current; |
456 | cmag = cache->mag_cache[CPU->id].current; |
459 | lastmag = cache->mag_cache[CPU->id].last; |
457 | lastmag = cache->mag_cache[CPU->id].last; |
460 | 458 | ||
461 | if (cmag) { |
459 | if (cmag) { |
462 | if (cmag->busy < cmag->size) |
460 | if (cmag->busy < cmag->size) |
463 | return cmag; |
461 | return cmag; |
464 | if (lastmag && lastmag->busy < lastmag->size) { |
462 | if (lastmag && lastmag->busy < lastmag->size) { |
465 | cache->mag_cache[CPU->id].last = cmag; |
463 | cache->mag_cache[CPU->id].last = cmag; |
466 | cache->mag_cache[CPU->id].current = lastmag; |
464 | cache->mag_cache[CPU->id].current = lastmag; |
467 | return lastmag; |
465 | return lastmag; |
468 | } |
466 | } |
469 | } |
467 | } |
470 | /* current | last are full | nonexistent, allocate new */ |
468 | /* current | last are full | nonexistent, allocate new */ |
471 | /* We do not want to sleep just because of caching */ |
469 | /* We do not want to sleep just because of caching */ |
472 | /* Especially we do not want reclaiming to start, as |
470 | /* Especially we do not want reclaiming to start, as |
473 | * this would deadlock */ |
471 | * this would deadlock */ |
474 | newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); |
472 | newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); |
475 | if (!newmag) |
473 | if (!newmag) |
476 | return NULL; |
474 | return NULL; |
477 | newmag->size = SLAB_MAG_SIZE; |
475 | newmag->size = SLAB_MAG_SIZE; |
478 | newmag->busy = 0; |
476 | newmag->busy = 0; |
479 | 477 | ||
480 | /* Flush last to magazine list */ |
478 | /* Flush last to magazine list */ |
481 | if (lastmag) |
479 | if (lastmag) |
482 | put_mag_to_cache(cache, lastmag); |
480 | put_mag_to_cache(cache, lastmag); |
483 | 481 | ||
484 | /* Move current as last, save new as current */ |
482 | /* Move current as last, save new as current */ |
485 | cache->mag_cache[CPU->id].last = cmag; |
483 | cache->mag_cache[CPU->id].last = cmag; |
486 | cache->mag_cache[CPU->id].current = newmag; |
484 | cache->mag_cache[CPU->id].current = newmag; |
487 | 485 | ||
488 | return newmag; |
486 | return newmag; |
489 | } |
487 | } |
490 | 488 | ||
491 | /** |
489 | /** |
492 | * Put object into CPU-cache magazine |
490 | * Put object into CPU-cache magazine |
493 | * |
491 | * |
494 | * @return 0 - success, -1 - could not get memory |
492 | * @return 0 - success, -1 - could not get memory |
495 | */ |
493 | */ |
496 | static int magazine_obj_put(slab_cache_t *cache, void *obj) |
494 | static int magazine_obj_put(slab_cache_t *cache, void *obj) |
497 | { |
495 | { |
498 | slab_magazine_t *mag; |
496 | slab_magazine_t *mag; |
499 | 497 | ||
500 | if (!CPU) |
498 | if (!CPU) |
501 | return -1; |
499 | return -1; |
502 | 500 | ||
503 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
501 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
504 | 502 | ||
505 | mag = make_empty_current_mag(cache); |
503 | mag = make_empty_current_mag(cache); |
506 | if (!mag) { |
504 | if (!mag) { |
507 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
505 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
508 | return -1; |
506 | return -1; |
509 | } |
507 | } |
510 | 508 | ||
511 | mag->objs[mag->busy++] = obj; |
509 | mag->objs[mag->busy++] = obj; |
512 | 510 | ||
513 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
511 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
514 | atomic_inc(&cache->cached_objs); |
512 | atomic_inc(&cache->cached_objs); |
515 | return 0; |
513 | return 0; |
516 | } |
514 | } |
517 | 515 | ||
518 | 516 | ||
519 | /**************************************/ |
517 | /**************************************/ |
520 | /* Slab cache functions */ |
518 | /* Slab cache functions */ |
521 | 519 | ||
522 | /** Return number of objects that fit in certain cache size */ |
520 | /** Return number of objects that fit in certain cache size */ |
523 | static int comp_objects(slab_cache_t *cache) |
521 | static int comp_objects(slab_cache_t *cache) |
524 | { |
522 | { |
525 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
523 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
526 | return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size; |
524 | return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size; |
527 | else |
525 | else |
528 | return (PAGE_SIZE << cache->order) / cache->size; |
526 | return (PAGE_SIZE << cache->order) / cache->size; |
529 | } |
527 | } |
530 | 528 | ||
531 | /** Return wasted space in slab */ |
529 | /** Return wasted space in slab */ |
532 | static int badness(slab_cache_t *cache) |
530 | static int badness(slab_cache_t *cache) |
533 | { |
531 | { |
534 | int objects; |
532 | int objects; |
535 | int ssize; |
533 | int ssize; |
536 | 534 | ||
537 | objects = comp_objects(cache); |
535 | objects = comp_objects(cache); |
538 | ssize = PAGE_SIZE << cache->order; |
536 | ssize = PAGE_SIZE << cache->order; |
539 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
537 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
540 | ssize -= sizeof(slab_t); |
538 | ssize -= sizeof(slab_t); |
541 | return ssize - objects*cache->size; |
539 | return ssize - objects*cache->size; |
542 | } |
540 | } |
543 | 541 | ||
544 | /** |
542 | /** |
545 | * Initialize mag_cache structure in slab cache |
543 | * Initialize mag_cache structure in slab cache |
546 | */ |
544 | */ |
547 | static void make_magcache(slab_cache_t *cache) |
545 | static void make_magcache(slab_cache_t *cache) |
548 | { |
546 | { |
549 | int i; |
547 | int i; |
550 | 548 | ||
551 | ASSERT(_slab_initialized >= 2); |
549 | ASSERT(_slab_initialized >= 2); |
552 | 550 | ||
553 | cache->mag_cache = malloc(sizeof(slab_mag_cache_t)*config.cpu_count,0); |
551 | cache->mag_cache = malloc(sizeof(slab_mag_cache_t)*config.cpu_count,0); |
554 | for (i=0; i < config.cpu_count; i++) { |
552 | for (i=0; i < config.cpu_count; i++) { |
555 | memsetb((__address)&cache->mag_cache[i], |
553 | memsetb((__address)&cache->mag_cache[i], |
556 | sizeof(cache->mag_cache[i]), 0); |
554 | sizeof(cache->mag_cache[i]), 0); |
557 | spinlock_initialize(&cache->mag_cache[i].lock, |
555 | spinlock_initialize(&cache->mag_cache[i].lock, |
558 | "slab_maglock_cpu"); |
556 | "slab_maglock_cpu"); |
559 | } |
557 | } |
560 | } |
558 | } |
561 | 559 | ||
562 | /** Initialize allocated memory as a slab cache */ |
560 | /** Initialize allocated memory as a slab cache */ |
563 | static void |
561 | static void |
564 | _slab_cache_create(slab_cache_t *cache, |
562 | _slab_cache_create(slab_cache_t *cache, |
565 | char *name, |
563 | char *name, |
566 | size_t size, |
564 | size_t size, |
567 | size_t align, |
565 | size_t align, |
568 | int (*constructor)(void *obj, int kmflag), |
566 | int (*constructor)(void *obj, int kmflag), |
569 | int (*destructor)(void *obj), |
567 | int (*destructor)(void *obj), |
570 | int flags) |
568 | int flags) |
571 | { |
569 | { |
572 | int pages; |
570 | int pages; |
573 | ipl_t ipl; |
571 | ipl_t ipl; |
574 | 572 | ||
575 | memsetb((__address)cache, sizeof(*cache), 0); |
573 | memsetb((__address)cache, sizeof(*cache), 0); |
576 | cache->name = name; |
574 | cache->name = name; |
577 | 575 | ||
578 | if (align < sizeof(__native)) |
576 | if (align < sizeof(__native)) |
579 | align = sizeof(__native); |
577 | align = sizeof(__native); |
580 | size = ALIGN_UP(size, align); |
578 | size = ALIGN_UP(size, align); |
581 | 579 | ||
582 | cache->size = size; |
580 | cache->size = size; |
583 | 581 | ||
584 | cache->constructor = constructor; |
582 | cache->constructor = constructor; |
585 | cache->destructor = destructor; |
583 | cache->destructor = destructor; |
586 | cache->flags = flags; |
584 | cache->flags = flags; |
587 | 585 | ||
588 | list_initialize(&cache->full_slabs); |
586 | list_initialize(&cache->full_slabs); |
589 | list_initialize(&cache->partial_slabs); |
587 | list_initialize(&cache->partial_slabs); |
590 | list_initialize(&cache->magazines); |
588 | list_initialize(&cache->magazines); |
591 | spinlock_initialize(&cache->slablock, "slab_lock"); |
589 | spinlock_initialize(&cache->slablock, "slab_lock"); |
592 | spinlock_initialize(&cache->maglock, "slab_maglock"); |
590 | spinlock_initialize(&cache->maglock, "slab_maglock"); |
593 | if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) |
591 | if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) |
594 | make_magcache(cache); |
592 | make_magcache(cache); |
595 | 593 | ||
596 | /* Compute slab sizes, object counts in slabs etc. */ |
594 | /* Compute slab sizes, object counts in slabs etc. */ |
597 | if (cache->size < SLAB_INSIDE_SIZE) |
595 | if (cache->size < SLAB_INSIDE_SIZE) |
598 | cache->flags |= SLAB_CACHE_SLINSIDE; |
596 | cache->flags |= SLAB_CACHE_SLINSIDE; |
599 | 597 | ||
600 | /* Minimum slab order */ |
598 | /* Minimum slab order */ |
601 | pages = SIZE2FRAMES(cache->size); |
599 | pages = SIZE2FRAMES(cache->size); |
602 | /* We need the 2^order >= pages */ |
600 | /* We need the 2^order >= pages */ |
603 | if (pages == 1) |
601 | if (pages == 1) |
604 | cache->order = 0; |
602 | cache->order = 0; |
605 | else |
603 | else |
606 | cache->order = fnzb(pages-1)+1; |
604 | cache->order = fnzb(pages-1)+1; |
607 | 605 | ||
608 | while (badness(cache) > SLAB_MAX_BADNESS(cache)) { |
606 | while (badness(cache) > SLAB_MAX_BADNESS(cache)) { |
609 | cache->order += 1; |
607 | cache->order += 1; |
610 | } |
608 | } |
611 | cache->objects = comp_objects(cache); |
609 | cache->objects = comp_objects(cache); |
612 | /* If info fits in, put it inside */ |
610 | /* If info fits in, put it inside */ |
613 | if (badness(cache) > sizeof(slab_t)) |
611 | if (badness(cache) > sizeof(slab_t)) |
614 | cache->flags |= SLAB_CACHE_SLINSIDE; |
612 | cache->flags |= SLAB_CACHE_SLINSIDE; |
615 | 613 | ||
616 | /* Add cache to cache list */ |
614 | /* Add cache to cache list */ |
617 | ipl = interrupts_disable(); |
615 | ipl = interrupts_disable(); |
618 | spinlock_lock(&slab_cache_lock); |
616 | spinlock_lock(&slab_cache_lock); |
619 | 617 | ||
620 | list_append(&cache->link, &slab_cache_list); |
618 | list_append(&cache->link, &slab_cache_list); |
621 | 619 | ||
622 | spinlock_unlock(&slab_cache_lock); |
620 | spinlock_unlock(&slab_cache_lock); |
623 | interrupts_restore(ipl); |
621 | interrupts_restore(ipl); |
624 | } |
622 | } |
625 | 623 | ||
626 | /** Create slab cache */ |
624 | /** Create slab cache */ |
627 | slab_cache_t * slab_cache_create(char *name, |
625 | slab_cache_t * slab_cache_create(char *name, |
628 | size_t size, |
626 | size_t size, |
629 | size_t align, |
627 | size_t align, |
630 | int (*constructor)(void *obj, int kmflag), |
628 | int (*constructor)(void *obj, int kmflag), |
631 | int (*destructor)(void *obj), |
629 | int (*destructor)(void *obj), |
632 | int flags) |
630 | int flags) |
633 | { |
631 | { |
634 | slab_cache_t *cache; |
632 | slab_cache_t *cache; |
635 | 633 | ||
636 | cache = slab_alloc(&slab_cache_cache, 0); |
634 | cache = slab_alloc(&slab_cache_cache, 0); |
637 | _slab_cache_create(cache, name, size, align, constructor, destructor, |
635 | _slab_cache_create(cache, name, size, align, constructor, destructor, |
638 | flags); |
636 | flags); |
639 | return cache; |
637 | return cache; |
640 | } |
638 | } |
641 | 639 | ||
642 | /** |
640 | /** |
643 | * Reclaim space occupied by objects that are already free |
641 | * Reclaim space occupied by objects that are already free |
644 | * |
642 | * |
645 | * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing |
643 | * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing |
646 | * @return Number of freed pages |
644 | * @return Number of freed pages |
647 | */ |
645 | */ |
648 | static count_t _slab_reclaim(slab_cache_t *cache, int flags) |
646 | static count_t _slab_reclaim(slab_cache_t *cache, int flags) |
649 | { |
647 | { |
650 | int i; |
648 | int i; |
651 | slab_magazine_t *mag; |
649 | slab_magazine_t *mag; |
652 | count_t frames = 0; |
650 | count_t frames = 0; |
653 | int magcount; |
651 | int magcount; |
654 | 652 | ||
655 | if (cache->flags & SLAB_CACHE_NOMAGAZINE) |
653 | if (cache->flags & SLAB_CACHE_NOMAGAZINE) |
656 | return 0; /* Nothing to do */ |
654 | return 0; /* Nothing to do */ |
657 | 655 | ||
658 | /* We count up to original magazine count to avoid |
656 | /* We count up to original magazine count to avoid |
659 | * endless loop |
657 | * endless loop |
660 | */ |
658 | */ |
661 | magcount = atomic_get(&cache->magazine_counter); |
659 | magcount = atomic_get(&cache->magazine_counter); |
662 | while (magcount-- && (mag=get_mag_from_cache(cache,0))) { |
660 | while (magcount-- && (mag=get_mag_from_cache(cache,0))) { |
663 | frames += magazine_destroy(cache,mag); |
661 | frames += magazine_destroy(cache,mag); |
664 | if (!(flags & SLAB_RECLAIM_ALL) && frames) |
662 | if (!(flags & SLAB_RECLAIM_ALL) && frames) |
665 | break; |
663 | break; |
666 | } |
664 | } |
667 | 665 | ||
668 | if (flags & SLAB_RECLAIM_ALL) { |
666 | if (flags & SLAB_RECLAIM_ALL) { |
669 | /* Free cpu-bound magazines */ |
667 | /* Free cpu-bound magazines */ |
670 | /* Destroy CPU magazines */ |
668 | /* Destroy CPU magazines */ |
671 | for (i=0; i<config.cpu_count; i++) { |
669 | for (i=0; i<config.cpu_count; i++) { |
672 | spinlock_lock(&cache->mag_cache[i].lock); |
670 | spinlock_lock(&cache->mag_cache[i].lock); |
673 | 671 | ||
674 | mag = cache->mag_cache[i].current; |
672 | mag = cache->mag_cache[i].current; |
675 | if (mag) |
673 | if (mag) |
676 | frames += magazine_destroy(cache, mag); |
674 | frames += magazine_destroy(cache, mag); |
677 | cache->mag_cache[i].current = NULL; |
675 | cache->mag_cache[i].current = NULL; |
678 | 676 | ||
679 | mag = cache->mag_cache[i].last; |
677 | mag = cache->mag_cache[i].last; |
680 | if (mag) |
678 | if (mag) |
681 | frames += magazine_destroy(cache, mag); |
679 | frames += magazine_destroy(cache, mag); |
682 | cache->mag_cache[i].last = NULL; |
680 | cache->mag_cache[i].last = NULL; |
683 | 681 | ||
684 | spinlock_unlock(&cache->mag_cache[i].lock); |
682 | spinlock_unlock(&cache->mag_cache[i].lock); |
685 | } |
683 | } |
686 | } |
684 | } |
687 | 685 | ||
688 | return frames; |
686 | return frames; |
689 | } |
687 | } |
690 | 688 | ||
691 | /** Check that there are no slabs and remove cache from system */ |
689 | /** Check that there are no slabs and remove cache from system */ |
692 | void slab_cache_destroy(slab_cache_t *cache) |
690 | void slab_cache_destroy(slab_cache_t *cache) |
693 | { |
691 | { |
694 | ipl_t ipl; |
692 | ipl_t ipl; |
695 | 693 | ||
696 | /* First remove cache from link, so that we don't need |
694 | /* First remove cache from link, so that we don't need |
697 | * to disable interrupts later |
695 | * to disable interrupts later |
698 | */ |
696 | */ |
699 | 697 | ||
700 | ipl = interrupts_disable(); |
698 | ipl = interrupts_disable(); |
701 | spinlock_lock(&slab_cache_lock); |
699 | spinlock_lock(&slab_cache_lock); |
702 | 700 | ||
703 | list_remove(&cache->link); |
701 | list_remove(&cache->link); |
704 | 702 | ||
705 | spinlock_unlock(&slab_cache_lock); |
703 | spinlock_unlock(&slab_cache_lock); |
706 | interrupts_restore(ipl); |
704 | interrupts_restore(ipl); |
707 | 705 | ||
708 | /* Do not lock anything, we assume the software is correct and |
706 | /* Do not lock anything, we assume the software is correct and |
709 | * does not touch the cache when it decides to destroy it */ |
707 | * does not touch the cache when it decides to destroy it */ |
710 | 708 | ||
711 | /* Destroy all magazines */ |
709 | /* Destroy all magazines */ |
712 | _slab_reclaim(cache, SLAB_RECLAIM_ALL); |
710 | _slab_reclaim(cache, SLAB_RECLAIM_ALL); |
713 | 711 | ||
714 | /* All slabs must be empty */ |
712 | /* All slabs must be empty */ |
715 | if (!list_empty(&cache->full_slabs) \ |
713 | if (!list_empty(&cache->full_slabs) \ |
716 | || !list_empty(&cache->partial_slabs)) |
714 | || !list_empty(&cache->partial_slabs)) |
717 | panic("Destroying cache that is not empty."); |
715 | panic("Destroying cache that is not empty."); |
718 | 716 | ||
719 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) |
717 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) |
720 | free(cache->mag_cache); |
718 | free(cache->mag_cache); |
721 | slab_free(&slab_cache_cache, cache); |
719 | slab_free(&slab_cache_cache, cache); |
722 | } |
720 | } |
723 | 721 | ||
724 | /** Allocate new object from cache - if no flags given, always returns |
722 | /** Allocate new object from cache - if no flags given, always returns |
725 | memory */ |
723 | memory */ |
726 | void * slab_alloc(slab_cache_t *cache, int flags) |
724 | void * slab_alloc(slab_cache_t *cache, int flags) |
727 | { |
725 | { |
728 | ipl_t ipl; |
726 | ipl_t ipl; |
729 | void *result = NULL; |
727 | void *result = NULL; |
730 | 728 | ||
731 | /* Disable interrupts to avoid deadlocks with interrupt handlers */ |
729 | /* Disable interrupts to avoid deadlocks with interrupt handlers */ |
732 | ipl = interrupts_disable(); |
730 | ipl = interrupts_disable(); |
733 | 731 | ||
734 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) { |
732 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) { |
735 | result = magazine_obj_get(cache); |
733 | result = magazine_obj_get(cache); |
736 | } |
734 | } |
737 | if (!result) |
735 | if (!result) |
738 | result = slab_obj_create(cache, flags); |
736 | result = slab_obj_create(cache, flags); |
739 | 737 | ||
740 | interrupts_restore(ipl); |
738 | interrupts_restore(ipl); |
741 | 739 | ||
742 | if (result) |
740 | if (result) |
743 | atomic_inc(&cache->allocated_objs); |
741 | atomic_inc(&cache->allocated_objs); |
744 | 742 | ||
745 | return result; |
743 | return result; |
746 | } |
744 | } |
747 | 745 | ||
748 | /** Return object to cache, use slab if known */ |
746 | /** Return object to cache, use slab if known */ |
749 | static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) |
747 | static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) |
750 | { |
748 | { |
751 | ipl_t ipl; |
749 | ipl_t ipl; |
752 | 750 | ||
753 | ipl = interrupts_disable(); |
751 | ipl = interrupts_disable(); |
754 | 752 | ||
755 | if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \ |
753 | if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \ |
756 | || magazine_obj_put(cache, obj)) { |
754 | || magazine_obj_put(cache, obj)) { |
757 | 755 | ||
758 | slab_obj_destroy(cache, obj, slab); |
756 | slab_obj_destroy(cache, obj, slab); |
759 | 757 | ||
760 | } |
758 | } |
761 | interrupts_restore(ipl); |
759 | interrupts_restore(ipl); |
762 | atomic_dec(&cache->allocated_objs); |
760 | atomic_dec(&cache->allocated_objs); |
763 | } |
761 | } |
764 | 762 | ||
765 | /** Return slab object to cache */ |
763 | /** Return slab object to cache */ |
766 | void slab_free(slab_cache_t *cache, void *obj) |
764 | void slab_free(slab_cache_t *cache, void *obj) |
767 | { |
765 | { |
768 | _slab_free(cache,obj,NULL); |
766 | _slab_free(cache,obj,NULL); |
769 | } |
767 | } |
770 | 768 | ||
771 | /* Go through all caches and reclaim what is possible */ |
769 | /* Go through all caches and reclaim what is possible */ |
772 | count_t slab_reclaim(int flags) |
770 | count_t slab_reclaim(int flags) |
773 | { |
771 | { |
774 | slab_cache_t *cache; |
772 | slab_cache_t *cache; |
775 | link_t *cur; |
773 | link_t *cur; |
776 | count_t frames = 0; |
774 | count_t frames = 0; |
777 | 775 | ||
778 | spinlock_lock(&slab_cache_lock); |
776 | spinlock_lock(&slab_cache_lock); |
779 | 777 | ||
780 | /* TODO: Add assert, that interrupts are disabled, otherwise |
778 | /* TODO: Add assert, that interrupts are disabled, otherwise |
781 | * memory allocation from interrupts can deadlock. |
779 | * memory allocation from interrupts can deadlock. |
782 | */ |
780 | */ |
783 | 781 | ||
784 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
782 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
785 | cache = list_get_instance(cur, slab_cache_t, link); |
783 | cache = list_get_instance(cur, slab_cache_t, link); |
786 | frames += _slab_reclaim(cache, flags); |
784 | frames += _slab_reclaim(cache, flags); |
787 | } |
785 | } |
788 | 786 | ||
789 | spinlock_unlock(&slab_cache_lock); |
787 | spinlock_unlock(&slab_cache_lock); |
790 | 788 | ||
791 | return frames; |
789 | return frames; |
792 | } |
790 | } |
793 | 791 | ||
794 | 792 | ||
795 | /* Print list of slabs */ |
793 | /* Print list of slabs */ |
796 | void slab_print_list(void) |
794 | void slab_print_list(void) |
797 | { |
795 | { |
798 | slab_cache_t *cache; |
796 | slab_cache_t *cache; |
799 | link_t *cur; |
797 | link_t *cur; |
800 | ipl_t ipl; |
798 | ipl_t ipl; |
801 | 799 | ||
802 | ipl = interrupts_disable(); |
800 | ipl = interrupts_disable(); |
803 | spinlock_lock(&slab_cache_lock); |
801 | spinlock_lock(&slab_cache_lock); |
804 | printf("slab name\t Osize\t Pages\t Obj/pg\t Slabs\t Cached\tAllocobjs\tCtl\n"); |
802 | printf("slab name\t Osize\t Pages\t Obj/pg\t Slabs\t Cached\tAllocobjs\tCtl\n"); |
805 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
803 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
806 | cache = list_get_instance(cur, slab_cache_t, link); |
804 | cache = list_get_instance(cur, slab_cache_t, link); |
807 | printf("%s\t%7zd\t%7zd\t%7zd\t%7zd\t%7zd\t%7zd\t\t%s\n", cache->name, cache->size, |
805 | printf("%s\t%7zd\t%7zd\t%7zd\t%7zd\t%7zd\t%7zd\t\t%s\n", cache->name, cache->size, |
808 | (1 << cache->order), cache->objects, |
806 | (1 << cache->order), cache->objects, |
809 | atomic_get(&cache->allocated_slabs), |
807 | atomic_get(&cache->allocated_slabs), |
810 | atomic_get(&cache->cached_objs), |
808 | atomic_get(&cache->cached_objs), |
811 | atomic_get(&cache->allocated_objs), |
809 | atomic_get(&cache->allocated_objs), |
812 | cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out"); |
810 | cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out"); |
813 | } |
811 | } |
814 | spinlock_unlock(&slab_cache_lock); |
812 | spinlock_unlock(&slab_cache_lock); |
815 | interrupts_restore(ipl); |
813 | interrupts_restore(ipl); |
816 | } |
814 | } |
817 | 815 | ||
818 | void slab_cache_init(void) |
816 | void slab_cache_init(void) |
819 | { |
817 | { |
820 | int i, size; |
818 | int i, size; |
821 | 819 | ||
822 | /* Initialize magazine cache */ |
820 | /* Initialize magazine cache */ |
823 | _slab_cache_create(&mag_cache, |
821 | _slab_cache_create(&mag_cache, |
824 | "slab_magazine", |
822 | "slab_magazine", |
825 | sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*), |
823 | sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*), |
826 | sizeof(__address), |
824 | sizeof(__address), |
827 | NULL, NULL, |
825 | NULL, NULL, |
828 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
826 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
829 | /* Initialize slab_cache cache */ |
827 | /* Initialize slab_cache cache */ |
830 | _slab_cache_create(&slab_cache_cache, |
828 | _slab_cache_create(&slab_cache_cache, |
831 | "slab_cache", |
829 | "slab_cache", |
832 | sizeof(slab_cache_cache), |
830 | sizeof(slab_cache_cache), |
833 | sizeof(__address), |
831 | sizeof(__address), |
834 | NULL, NULL, |
832 | NULL, NULL, |
835 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
833 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
836 | /* Initialize external slab cache */ |
834 | /* Initialize external slab cache */ |
837 | slab_extern_cache = slab_cache_create("slab_extern", |
835 | slab_extern_cache = slab_cache_create("slab_extern", |
838 | sizeof(slab_t), |
836 | sizeof(slab_t), |
839 | 0, NULL, NULL, |
837 | 0, NULL, NULL, |
840 | SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED); |
838 | SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED); |
841 | 839 | ||
842 | /* Initialize structures for malloc */ |
840 | /* Initialize structures for malloc */ |
843 | for (i=0, size=(1<<SLAB_MIN_MALLOC_W); |
841 | for (i=0, size=(1<<SLAB_MIN_MALLOC_W); |
844 | i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1); |
842 | i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1); |
845 | i++, size <<= 1) { |
843 | i++, size <<= 1) { |
846 | malloc_caches[i] = slab_cache_create(malloc_names[i], |
844 | malloc_caches[i] = slab_cache_create(malloc_names[i], |
847 | size, 0, |
845 | size, 0, |
848 | NULL,NULL, SLAB_CACHE_MAGDEFERRED); |
846 | NULL,NULL, SLAB_CACHE_MAGDEFERRED); |
849 | } |
847 | } |
850 | #ifdef CONFIG_DEBUG |
848 | #ifdef CONFIG_DEBUG |
851 | _slab_initialized = 1; |
849 | _slab_initialized = 1; |
852 | #endif |
850 | #endif |
853 | } |
851 | } |
854 | 852 | ||
855 | /** Enable cpu_cache |
853 | /** Enable cpu_cache |
856 | * |
854 | * |
857 | * Kernel calls this function, when it knows the real number of |
855 | * Kernel calls this function, when it knows the real number of |
858 | * processors. |
856 | * processors. |
859 | * Allocate slab for cpucache and enable it on all existing |
857 | * Allocate slab for cpucache and enable it on all existing |
860 | * slabs that are SLAB_CACHE_MAGDEFERRED |
858 | * slabs that are SLAB_CACHE_MAGDEFERRED |
861 | */ |
859 | */ |
862 | void slab_enable_cpucache(void) |
860 | void slab_enable_cpucache(void) |
863 | { |
861 | { |
864 | link_t *cur; |
862 | link_t *cur; |
865 | slab_cache_t *s; |
863 | slab_cache_t *s; |
866 | 864 | ||
867 | #ifdef CONFIG_DEBUG |
865 | #ifdef CONFIG_DEBUG |
868 | _slab_initialized = 2; |
866 | _slab_initialized = 2; |
869 | #endif |
867 | #endif |
870 | 868 | ||
871 | spinlock_lock(&slab_cache_lock); |
869 | spinlock_lock(&slab_cache_lock); |
872 | 870 | ||
873 | for (cur=slab_cache_list.next; cur != &slab_cache_list;cur=cur->next){ |
871 | for (cur=slab_cache_list.next; cur != &slab_cache_list;cur=cur->next){ |
874 | s = list_get_instance(cur, slab_cache_t, link); |
872 | s = list_get_instance(cur, slab_cache_t, link); |
875 | if ((s->flags & SLAB_CACHE_MAGDEFERRED) != SLAB_CACHE_MAGDEFERRED) |
873 | if ((s->flags & SLAB_CACHE_MAGDEFERRED) != SLAB_CACHE_MAGDEFERRED) |
876 | continue; |
874 | continue; |
877 | make_magcache(s); |
875 | make_magcache(s); |
878 | s->flags &= ~SLAB_CACHE_MAGDEFERRED; |
876 | s->flags &= ~SLAB_CACHE_MAGDEFERRED; |
879 | } |
877 | } |
880 | 878 | ||
881 | spinlock_unlock(&slab_cache_lock); |
879 | spinlock_unlock(&slab_cache_lock); |
882 | } |
880 | } |
883 | 881 | ||
884 | /**************************************/ |
882 | /**************************************/ |
885 | /* kalloc/kfree functions */ |
883 | /* kalloc/kfree functions */ |
886 | void * malloc(unsigned int size, int flags) |
884 | void * malloc(unsigned int size, int flags) |
887 | { |
885 | { |
888 | int idx; |
886 | int idx; |
889 | 887 | ||
890 | ASSERT(_slab_initialized); |
888 | ASSERT(_slab_initialized); |
891 | ASSERT(size && size <= (1 << SLAB_MAX_MALLOC_W)); |
889 | ASSERT(size && size <= (1 << SLAB_MAX_MALLOC_W)); |
892 | 890 | ||
893 | if (size < (1 << SLAB_MIN_MALLOC_W)) |
891 | if (size < (1 << SLAB_MIN_MALLOC_W)) |
894 | size = (1 << SLAB_MIN_MALLOC_W); |
892 | size = (1 << SLAB_MIN_MALLOC_W); |
895 | 893 | ||
896 | idx = fnzb(size-1) - SLAB_MIN_MALLOC_W + 1; |
894 | idx = fnzb(size-1) - SLAB_MIN_MALLOC_W + 1; |
897 | 895 | ||
898 | return slab_alloc(malloc_caches[idx], flags); |
896 | return slab_alloc(malloc_caches[idx], flags); |
899 | } |
897 | } |
900 | 898 | ||
901 | void free(void *obj) |
899 | void free(void *obj) |
902 | { |
900 | { |
903 | slab_t *slab; |
901 | slab_t *slab; |
904 | 902 | ||
905 | if (!obj) return; |
903 | if (!obj) return; |
906 | 904 | ||
907 | slab = obj2slab(obj); |
905 | slab = obj2slab(obj); |
908 | _slab_free(slab->cache, obj, slab); |
906 | _slab_free(slab->cache, obj, slab); |
909 | } |
907 | } |
910 | 908 | ||
911 | /** @} |
909 | /** @} |
912 | */ |
910 | */ |
913 | 911 |