Rev 1966 | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1966 | Rev 1968 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2006 Ondrej Palkovsky |
2 | * Copyright (C) 2006 Ondrej Palkovsky |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /** @addtogroup genericmm |
29 | /** @addtogroup genericmm |
30 | * @{ |
30 | * @{ |
31 | */ |
31 | */ |
32 | 32 | ||
33 | /** |
33 | /** |
34 | * @file |
34 | * @file |
35 | * @brief Slab allocator. |
35 | * @brief Slab allocator. |
36 | * |
36 | * |
37 | * The slab allocator is closely modelled after OpenSolaris slab allocator. |
37 | * The slab allocator is closely modelled after OpenSolaris slab allocator. |
38 | * @see http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/ |
38 | * @see http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/ |
39 | * |
39 | * |
40 | * with the following exceptions: |
40 | * with the following exceptions: |
41 | * @li empty slabs are deallocated immediately |
41 | * @li empty slabs are deallocated immediately |
42 | * (in Linux they are kept in linked list, in Solaris ???) |
42 | * (in Linux they are kept in linked list, in Solaris ???) |
43 | * @li empty magazines are deallocated when not needed |
43 | * @li empty magazines are deallocated when not needed |
44 | * (in Solaris they are held in linked list in slab cache) |
44 | * (in Solaris they are held in linked list in slab cache) |
45 | * |
45 | * |
46 | * Following features are not currently supported but would be easy to do: |
46 | * Following features are not currently supported but would be easy to do: |
47 | * @li cache coloring |
47 | * @li cache coloring |
48 | * @li dynamic magazine growing (different magazine sizes are already |
48 | * @li dynamic magazine growing (different magazine sizes are already |
49 | * supported, but we would need to adjust allocation strategy) |
49 | * supported, but we would need to adjust allocation strategy) |
50 | * |
50 | * |
51 | * The slab allocator supports per-CPU caches ('magazines') to facilitate |
51 | * The slab allocator supports per-CPU caches ('magazines') to facilitate |
52 | * good SMP scaling. |
52 | * good SMP scaling. |
53 | * |
53 | * |
54 | * When a new object is being allocated, it is first checked, if it is |
54 | * When a new object is being allocated, it is first checked, if it is |
55 | * available in a CPU-bound magazine. If it is not found there, it is |
55 | * available in a CPU-bound magazine. If it is not found there, it is |
56 | * allocated from a CPU-shared slab - if a partially full one is found, |
56 | * allocated from a CPU-shared slab - if a partially full one is found, |
57 | * it is used, otherwise a new one is allocated. |
57 | * it is used, otherwise a new one is allocated. |
58 | * |
58 | * |
59 | * When an object is being deallocated, it is put to a CPU-bound magazine. |
59 | * When an object is being deallocated, it is put to a CPU-bound magazine. |
60 | * If there is no such magazine, a new one is allocated (if this fails, |
60 | * If there is no such magazine, a new one is allocated (if this fails, |
61 | * the object is deallocated into slab). If the magazine is full, it is |
61 | * the object is deallocated into slab). If the magazine is full, it is |
62 | * put into cpu-shared list of magazines and a new one is allocated. |
62 | * put into cpu-shared list of magazines and a new one is allocated. |
63 | * |
63 | * |
64 | * The CPU-bound magazine is actually a pair of magazines in order to avoid |
64 | * The CPU-bound magazine is actually a pair of magazines in order to avoid |
65 | * thrashing when somebody is allocating/deallocating 1 item at the magazine |
65 | * thrashing when somebody is allocating/deallocating 1 item at the magazine |
66 | * size boundary. LIFO order is enforced, which should avoid fragmentation |
66 | * size boundary. LIFO order is enforced, which should avoid fragmentation |
67 | * as much as possible. |
67 | * as much as possible. |
68 | * |
68 | * |
69 | * Every cache contains list of full slabs and list of partially full slabs. |
69 | * Every cache contains list of full slabs and list of partially full slabs. |
70 | * Empty slabs are immediately freed (thrashing will be avoided because |
70 | * Empty slabs are immediately freed (thrashing will be avoided because |
71 | * of magazines). |
71 | * of magazines). |
72 | * |
72 | * |
73 | * The slab information structure is kept inside the data area, if possible. |
73 | * The slab information structure is kept inside the data area, if possible. |
74 | * The cache can be marked that it should not use magazines. This is used |
74 | * The cache can be marked that it should not use magazines. This is used |
75 | * only for slab related caches to avoid deadlocks and infinite recursion |
75 | * only for slab related caches to avoid deadlocks and infinite recursion |
76 | * (the slab allocator uses itself for allocating all it's control structures). |
76 | * (the slab allocator uses itself for allocating all it's control structures). |
77 | * |
77 | * |
78 | * The slab allocator allocates a lot of space and does not free it. When |
78 | * The slab allocator allocates a lot of space and does not free it. When |
79 | * the frame allocator fails to allocate a frame, it calls slab_reclaim(). |
79 | * the frame allocator fails to allocate a frame, it calls slab_reclaim(). |
80 | * It tries 'light reclaim' first, then brutal reclaim. The light reclaim |
80 | * It tries 'light reclaim' first, then brutal reclaim. The light reclaim |
81 | * releases slabs from cpu-shared magazine-list, until at least 1 slab |
81 | * releases slabs from cpu-shared magazine-list, until at least 1 slab |
82 | * is deallocated in each cache (this algorithm should probably change). |
82 | * is deallocated in each cache (this algorithm should probably change). |
83 | * The brutal reclaim removes all cached objects, even from CPU-bound |
83 | * The brutal reclaim removes all cached objects, even from CPU-bound |
84 | * magazines. |
84 | * magazines. |
85 | * |
85 | * |
86 | * TODO:@n |
86 | * TODO:@n |
87 | * For better CPU-scaling the magazine allocation strategy should |
87 | * For better CPU-scaling the magazine allocation strategy should |
88 | * be extended. Currently, if the cache does not have magazine, it asks |
88 | * be extended. Currently, if the cache does not have magazine, it asks |
89 | * for non-cpu cached magazine cache to provide one. It might be feasible |
89 | * for non-cpu cached magazine cache to provide one. It might be feasible |
90 | * to add cpu-cached magazine cache (which would allocate it's magazines |
90 | * to add cpu-cached magazine cache (which would allocate it's magazines |
91 | * from non-cpu-cached mag. cache). This would provide a nice per-cpu |
91 | * from non-cpu-cached mag. cache). This would provide a nice per-cpu |
92 | * buffer. The other possibility is to use the per-cache |
92 | * buffer. The other possibility is to use the per-cache |
93 | * 'empty-magazine-list', which decreases competing for 1 per-system |
93 | * 'empty-magazine-list', which decreases competing for 1 per-system |
94 | * magazine cache. |
94 | * magazine cache. |
95 | * |
95 | * |
96 | * @li it might be good to add granularity of locks even to slab level, |
96 | * @li it might be good to add granularity of locks even to slab level, |
97 | * we could then try_spinlock over all partial slabs and thus improve |
97 | * we could then try_spinlock over all partial slabs and thus improve |
98 | * scalability even on slab level |
98 | * scalability even on slab level |
99 | */ |
99 | */ |
100 | 100 | ||
101 | #include <synch/spinlock.h> |
101 | #include <synch/spinlock.h> |
102 | #include <mm/slab.h> |
102 | #include <mm/slab.h> |
103 | #include <adt/list.h> |
103 | #include <adt/list.h> |
104 | #include <memstr.h> |
104 | #include <memstr.h> |
105 | #include <align.h> |
105 | #include <align.h> |
106 | #include <mm/frame.h> |
106 | #include <mm/frame.h> |
107 | #include <config.h> |
107 | #include <config.h> |
108 | #include <print.h> |
108 | #include <print.h> |
109 | #include <arch.h> |
109 | #include <arch.h> |
110 | #include <panic.h> |
110 | #include <panic.h> |
111 | #include <debug.h> |
111 | #include <debug.h> |
112 | #include <bitops.h> |
112 | #include <bitops.h> |
113 | 113 | ||
114 | SPINLOCK_INITIALIZE(slab_cache_lock); |
114 | SPINLOCK_INITIALIZE(slab_cache_lock); |
115 | static LIST_INITIALIZE(slab_cache_list); |
115 | static LIST_INITIALIZE(slab_cache_list); |
116 | 116 | ||
117 | /** Magazine cache */ |
117 | /** Magazine cache */ |
118 | static slab_cache_t mag_cache; |
118 | static slab_cache_t mag_cache; |
119 | /** Cache for cache descriptors */ |
119 | /** Cache for cache descriptors */ |
120 | static slab_cache_t slab_cache_cache; |
120 | static slab_cache_t slab_cache_cache; |
121 | /** Cache for external slab descriptors |
121 | /** Cache for external slab descriptors |
122 | * This time we want per-cpu cache, so do not make it static |
122 | * This time we want per-cpu cache, so do not make it static |
123 | * - using slab for internal slab structures will not deadlock, |
123 | * - using slab for internal slab structures will not deadlock, |
124 | * as all slab structures are 'small' - control structures of |
124 | * as all slab structures are 'small' - control structures of |
125 | * their caches do not require further allocation |
125 | * their caches do not require further allocation |
126 | */ |
126 | */ |
127 | static slab_cache_t *slab_extern_cache; |
127 | static slab_cache_t *slab_extern_cache; |
128 | /** Caches for malloc */ |
128 | /** Caches for malloc */ |
129 | static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1]; |
129 | static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1]; |
130 | char *malloc_names[] = { |
130 | char *malloc_names[] = { |
131 | "malloc-16","malloc-32","malloc-64","malloc-128", |
131 | "malloc-16","malloc-32","malloc-64","malloc-128", |
132 | "malloc-256","malloc-512","malloc-1K","malloc-2K", |
132 | "malloc-256","malloc-512","malloc-1K","malloc-2K", |
133 | "malloc-4K","malloc-8K","malloc-16K","malloc-32K", |
133 | "malloc-4K","malloc-8K","malloc-16K","malloc-32K", |
134 | "malloc-64K","malloc-128K","malloc-256K" |
134 | "malloc-64K","malloc-128K","malloc-256K" |
135 | }; |
135 | }; |
136 | 136 | ||
137 | /** Slab descriptor */ |
137 | /** Slab descriptor */ |
138 | typedef struct { |
138 | typedef struct { |
139 | slab_cache_t *cache; /**< Pointer to parent cache */ |
139 | slab_cache_t *cache; /**< Pointer to parent cache */ |
140 | link_t link; /* List of full/partial slabs */ |
140 | link_t link; /* List of full/partial slabs */ |
141 | void *start; /**< Start address of first available item */ |
141 | void *start; /**< Start address of first available item */ |
142 | count_t available; /**< Count of available items in this slab */ |
142 | count_t available; /**< Count of available items in this slab */ |
143 | index_t nextavail; /**< The index of next available item */ |
143 | index_t nextavail; /**< The index of next available item */ |
144 | }slab_t; |
144 | }slab_t; |
145 | 145 | ||
146 | #ifdef CONFIG_DEBUG |
146 | #ifdef CONFIG_DEBUG |
147 | static int _slab_initialized = 0; |
147 | static int _slab_initialized = 0; |
148 | #endif |
148 | #endif |
149 | 149 | ||
150 | /**************************************/ |
150 | /**************************************/ |
151 | /* Slab allocation functions */ |
151 | /* Slab allocation functions */ |
152 | 152 | ||
153 | /** |
153 | /** |
154 | * Allocate frames for slab space and initialize |
154 | * Allocate frames for slab space and initialize |
155 | * |
155 | * |
156 | */ |
156 | */ |
157 | static slab_t * slab_space_alloc(slab_cache_t *cache, int flags) |
157 | static slab_t * slab_space_alloc(slab_cache_t *cache, int flags) |
158 | { |
158 | { |
159 | void *data; |
159 | void *data; |
160 | slab_t *slab; |
160 | slab_t *slab; |
161 | size_t fsize; |
161 | size_t fsize; |
162 | int i; |
162 | int i; |
163 | int status; |
163 | int status; |
164 | pfn_t pfn; |
164 | pfn_t pfn; |
165 | int zone=0; |
165 | int zone=0; |
166 | 166 | ||
167 | pfn = frame_alloc_rc_zone(cache->order, FRAME_KA | flags, &status, &zone); |
167 | pfn = frame_alloc_rc_zone(cache->order, FRAME_KA | flags, &status, &zone); |
168 | data = (void *) PA2KA(PFN2ADDR(pfn)); |
168 | data = (void *) PA2KA(PFN2ADDR(pfn)); |
169 | if (status != FRAME_OK) { |
169 | if (status != FRAME_OK) { |
170 | return NULL; |
170 | return NULL; |
171 | } |
171 | } |
172 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) { |
172 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) { |
173 | slab = slab_alloc(slab_extern_cache, flags); |
173 | slab = slab_alloc(slab_extern_cache, flags); |
174 | if (!slab) { |
174 | if (!slab) { |
175 | frame_free(ADDR2PFN(KA2PA(data))); |
175 | frame_free(ADDR2PFN(KA2PA(data))); |
176 | return NULL; |
176 | return NULL; |
177 | } |
177 | } |
178 | } else { |
178 | } else { |
179 | fsize = (PAGE_SIZE << cache->order); |
179 | fsize = (PAGE_SIZE << cache->order); |
180 | slab = data + fsize - sizeof(*slab); |
180 | slab = data + fsize - sizeof(*slab); |
181 | } |
181 | } |
182 | 182 | ||
183 | /* Fill in slab structures */ |
183 | /* Fill in slab structures */ |
184 | for (i=0; i < (1 << cache->order); i++) |
184 | for (i=0; i < (1 << cache->order); i++) |
185 | frame_set_parent(pfn+i, slab, zone); |
185 | frame_set_parent(pfn+i, slab, zone); |
186 | 186 | ||
187 | slab->start = data; |
187 | slab->start = data; |
188 | slab->available = cache->objects; |
188 | slab->available = cache->objects; |
189 | slab->nextavail = 0; |
189 | slab->nextavail = 0; |
190 | slab->cache = cache; |
190 | slab->cache = cache; |
191 | 191 | ||
192 | for (i=0; i<cache->objects;i++) |
192 | for (i=0; i<cache->objects;i++) |
193 | *((int *) (slab->start + i*cache->size)) = i+1; |
193 | *((int *) (slab->start + i*cache->size)) = i+1; |
194 | 194 | ||
195 | atomic_inc(&cache->allocated_slabs); |
195 | atomic_inc(&cache->allocated_slabs); |
196 | return slab; |
196 | return slab; |
197 | } |
197 | } |
198 | 198 | ||
199 | /** |
199 | /** |
200 | * Deallocate space associated with slab |
200 | * Deallocate space associated with slab |
201 | * |
201 | * |
202 | * @return number of freed frames |
202 | * @return number of freed frames |
203 | */ |
203 | */ |
204 | static count_t slab_space_free(slab_cache_t *cache, slab_t *slab) |
204 | static count_t slab_space_free(slab_cache_t *cache, slab_t *slab) |
205 | { |
205 | { |
206 | frame_free(ADDR2PFN(KA2PA(slab->start))); |
206 | frame_free(ADDR2PFN(KA2PA(slab->start))); |
207 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) |
207 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) |
208 | slab_free(slab_extern_cache, slab); |
208 | slab_free(slab_extern_cache, slab); |
209 | 209 | ||
210 | atomic_dec(&cache->allocated_slabs); |
210 | atomic_dec(&cache->allocated_slabs); |
211 | 211 | ||
212 | return 1 << cache->order; |
212 | return 1 << cache->order; |
213 | } |
213 | } |
214 | 214 | ||
215 | /** Map object to slab structure */ |
215 | /** Map object to slab structure */ |
216 | static slab_t * obj2slab(void *obj) |
216 | static slab_t * obj2slab(void *obj) |
217 | { |
217 | { |
218 | return (slab_t *)frame_get_parent(ADDR2PFN(KA2PA(obj)), 0); |
218 | return (slab_t *)frame_get_parent(ADDR2PFN(KA2PA(obj)), 0); |
219 | } |
219 | } |
220 | 220 | ||
221 | /**************************************/ |
221 | /**************************************/ |
222 | /* Slab functions */ |
222 | /* Slab functions */ |
223 | 223 | ||
224 | 224 | ||
225 | /** |
225 | /** |
226 | * Return object to slab and call a destructor |
226 | * Return object to slab and call a destructor |
227 | * |
227 | * |
228 | * @param slab If the caller knows directly slab of the object, otherwise NULL |
228 | * @param slab If the caller knows directly slab of the object, otherwise NULL |
229 | * |
229 | * |
230 | * @return Number of freed pages |
230 | * @return Number of freed pages |
231 | */ |
231 | */ |
232 | static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, |
232 | static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, |
233 | slab_t *slab) |
233 | slab_t *slab) |
234 | { |
234 | { |
235 | int freed = 0; |
235 | int freed = 0; |
236 | 236 | ||
237 | if (!slab) |
237 | if (!slab) |
238 | slab = obj2slab(obj); |
238 | slab = obj2slab(obj); |
239 | 239 | ||
240 | ASSERT(slab->cache == cache); |
240 | ASSERT(slab->cache == cache); |
241 | 241 | ||
242 | if (cache->destructor) |
242 | if (cache->destructor) |
243 | freed = cache->destructor(obj); |
243 | freed = cache->destructor(obj); |
244 | 244 | ||
245 | spinlock_lock(&cache->slablock); |
245 | spinlock_lock(&cache->slablock); |
246 | ASSERT(slab->available < cache->objects); |
246 | ASSERT(slab->available < cache->objects); |
247 | 247 | ||
248 | *((int *)obj) = slab->nextavail; |
248 | *((int *)obj) = slab->nextavail; |
249 | slab->nextavail = (obj - slab->start)/cache->size; |
249 | slab->nextavail = (obj - slab->start)/cache->size; |
250 | slab->available++; |
250 | slab->available++; |
251 | 251 | ||
252 | /* Move it to correct list */ |
252 | /* Move it to correct list */ |
253 | if (slab->available == cache->objects) { |
253 | if (slab->available == cache->objects) { |
254 | /* Free associated memory */ |
254 | /* Free associated memory */ |
255 | list_remove(&slab->link); |
255 | list_remove(&slab->link); |
256 | spinlock_unlock(&cache->slablock); |
256 | spinlock_unlock(&cache->slablock); |
257 | 257 | ||
258 | return freed + slab_space_free(cache, slab); |
258 | return freed + slab_space_free(cache, slab); |
259 | 259 | ||
260 | } else if (slab->available == 1) { |
260 | } else if (slab->available == 1) { |
261 | /* It was in full, move to partial */ |
261 | /* It was in full, move to partial */ |
262 | list_remove(&slab->link); |
262 | list_remove(&slab->link); |
263 | list_prepend(&slab->link, &cache->partial_slabs); |
263 | list_prepend(&slab->link, &cache->partial_slabs); |
264 | } |
264 | } |
265 | spinlock_unlock(&cache->slablock); |
265 | spinlock_unlock(&cache->slablock); |
266 | return freed; |
266 | return freed; |
267 | } |
267 | } |
268 | 268 | ||
269 | /** |
269 | /** |
270 | * Take new object from slab or create new if needed |
270 | * Take new object from slab or create new if needed |
271 | * |
271 | * |
272 | * @return Object address or null |
272 | * @return Object address or null |
273 | */ |
273 | */ |
274 | static void * slab_obj_create(slab_cache_t *cache, int flags) |
274 | static void * slab_obj_create(slab_cache_t *cache, int flags) |
275 | { |
275 | { |
276 | slab_t *slab; |
276 | slab_t *slab; |
277 | void *obj; |
277 | void *obj; |
278 | 278 | ||
279 | spinlock_lock(&cache->slablock); |
279 | spinlock_lock(&cache->slablock); |
280 | 280 | ||
281 | if (list_empty(&cache->partial_slabs)) { |
281 | if (list_empty(&cache->partial_slabs)) { |
282 | /* Allow recursion and reclaiming |
282 | /* Allow recursion and reclaiming |
283 | * - this should work, as the slab control structures |
283 | * - this should work, as the slab control structures |
284 | * are small and do not need to allocate with anything |
284 | * are small and do not need to allocate with anything |
285 | * other than frame_alloc when they are allocating, |
285 | * other than frame_alloc when they are allocating, |
286 | * that's why we should get recursion at most 1-level deep |
286 | * that's why we should get recursion at most 1-level deep |
287 | */ |
287 | */ |
288 | spinlock_unlock(&cache->slablock); |
288 | spinlock_unlock(&cache->slablock); |
289 | slab = slab_space_alloc(cache, flags); |
289 | slab = slab_space_alloc(cache, flags); |
290 | if (!slab) |
290 | if (!slab) |
291 | return NULL; |
291 | return NULL; |
292 | spinlock_lock(&cache->slablock); |
292 | spinlock_lock(&cache->slablock); |
293 | } else { |
293 | } else { |
294 | slab = list_get_instance(cache->partial_slabs.next, |
294 | slab = list_get_instance(cache->partial_slabs.next, |
295 | slab_t, |
295 | slab_t, |
296 | link); |
296 | link); |
297 | list_remove(&slab->link); |
297 | list_remove(&slab->link); |
298 | } |
298 | } |
299 | obj = slab->start + slab->nextavail * cache->size; |
299 | obj = slab->start + slab->nextavail * cache->size; |
300 | slab->nextavail = *((int *)obj); |
300 | slab->nextavail = *((int *)obj); |
301 | slab->available--; |
301 | slab->available--; |
302 | 302 | ||
303 | if (! slab->available) |
303 | if (! slab->available) |
304 | list_prepend(&slab->link, &cache->full_slabs); |
304 | list_prepend(&slab->link, &cache->full_slabs); |
305 | else |
305 | else |
306 | list_prepend(&slab->link, &cache->partial_slabs); |
306 | list_prepend(&slab->link, &cache->partial_slabs); |
307 | 307 | ||
308 | spinlock_unlock(&cache->slablock); |
308 | spinlock_unlock(&cache->slablock); |
309 | 309 | ||
310 | if (cache->constructor && cache->constructor(obj, flags)) { |
310 | if (cache->constructor && cache->constructor(obj, flags)) { |
311 | /* Bad, bad, construction failed */ |
311 | /* Bad, bad, construction failed */ |
312 | slab_obj_destroy(cache, obj, slab); |
312 | slab_obj_destroy(cache, obj, slab); |
313 | return NULL; |
313 | return NULL; |
314 | } |
314 | } |
315 | return obj; |
315 | return obj; |
316 | } |
316 | } |
317 | 317 | ||
318 | /**************************************/ |
318 | /**************************************/ |
319 | /* CPU-Cache slab functions */ |
319 | /* CPU-Cache slab functions */ |
320 | 320 | ||
321 | /** |
321 | /** |
322 | * Finds a full magazine in cache, takes it from list |
322 | * Finds a full magazine in cache, takes it from list |
323 | * and returns it |
323 | * and returns it |
324 | * |
324 | * |
325 | * @param first If true, return first, else last mag |
325 | * @param first If true, return first, else last mag |
326 | */ |
326 | */ |
327 | static slab_magazine_t * get_mag_from_cache(slab_cache_t *cache, |
327 | static slab_magazine_t * get_mag_from_cache(slab_cache_t *cache, |
328 | int first) |
328 | int first) |
329 | { |
329 | { |
330 | slab_magazine_t *mag = NULL; |
330 | slab_magazine_t *mag = NULL; |
331 | link_t *cur; |
331 | link_t *cur; |
332 | 332 | ||
333 | spinlock_lock(&cache->maglock); |
333 | spinlock_lock(&cache->maglock); |
334 | if (!list_empty(&cache->magazines)) { |
334 | if (!list_empty(&cache->magazines)) { |
335 | if (first) |
335 | if (first) |
336 | cur = cache->magazines.next; |
336 | cur = cache->magazines.next; |
337 | else |
337 | else |
338 | cur = cache->magazines.prev; |
338 | cur = cache->magazines.prev; |
339 | mag = list_get_instance(cur, slab_magazine_t, link); |
339 | mag = list_get_instance(cur, slab_magazine_t, link); |
340 | list_remove(&mag->link); |
340 | list_remove(&mag->link); |
341 | atomic_dec(&cache->magazine_counter); |
341 | atomic_dec(&cache->magazine_counter); |
342 | } |
342 | } |
343 | spinlock_unlock(&cache->maglock); |
343 | spinlock_unlock(&cache->maglock); |
344 | return mag; |
344 | return mag; |
345 | } |
345 | } |
346 | 346 | ||
347 | /** Prepend magazine to magazine list in cache */ |
347 | /** Prepend magazine to magazine list in cache */ |
348 | static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag) |
348 | static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag) |
349 | { |
349 | { |
350 | spinlock_lock(&cache->maglock); |
350 | spinlock_lock(&cache->maglock); |
351 | 351 | ||
352 | list_prepend(&mag->link, &cache->magazines); |
352 | list_prepend(&mag->link, &cache->magazines); |
353 | atomic_inc(&cache->magazine_counter); |
353 | atomic_inc(&cache->magazine_counter); |
354 | 354 | ||
355 | spinlock_unlock(&cache->maglock); |
355 | spinlock_unlock(&cache->maglock); |
356 | } |
356 | } |
357 | 357 | ||
358 | /** |
358 | /** |
359 | * Free all objects in magazine and free memory associated with magazine |
359 | * Free all objects in magazine and free memory associated with magazine |
360 | * |
360 | * |
361 | * @return Number of freed pages |
361 | * @return Number of freed pages |
362 | */ |
362 | */ |
363 | static count_t magazine_destroy(slab_cache_t *cache, |
363 | static count_t magazine_destroy(slab_cache_t *cache, |
364 | slab_magazine_t *mag) |
364 | slab_magazine_t *mag) |
365 | { |
365 | { |
366 | int i; |
366 | int i; |
367 | count_t frames = 0; |
367 | count_t frames = 0; |
368 | 368 | ||
369 | for (i=0;i < mag->busy; i++) { |
369 | for (i=0;i < mag->busy; i++) { |
370 | frames += slab_obj_destroy(cache, mag->objs[i], NULL); |
370 | frames += slab_obj_destroy(cache, mag->objs[i], NULL); |
371 | atomic_dec(&cache->cached_objs); |
371 | atomic_dec(&cache->cached_objs); |
372 | } |
372 | } |
373 | 373 | ||
374 | slab_free(&mag_cache, mag); |
374 | slab_free(&mag_cache, mag); |
375 | 375 | ||
376 | return frames; |
376 | return frames; |
377 | } |
377 | } |
378 | 378 | ||
379 | /** |
379 | /** |
380 | * Find full magazine, set it as current and return it |
380 | * Find full magazine, set it as current and return it |
381 | * |
381 | * |
382 | * Assume cpu_magazine lock is held |
382 | * Assume cpu_magazine lock is held |
383 | */ |
383 | */ |
384 | static slab_magazine_t * get_full_current_mag(slab_cache_t *cache) |
384 | static slab_magazine_t * get_full_current_mag(slab_cache_t *cache) |
385 | { |
385 | { |
386 | slab_magazine_t *cmag, *lastmag, *newmag; |
386 | slab_magazine_t *cmag, *lastmag, *newmag; |
387 | 387 | ||
388 | cmag = cache->mag_cache[CPU->id].current; |
388 | cmag = cache->mag_cache[CPU->id].current; |
389 | lastmag = cache->mag_cache[CPU->id].last; |
389 | lastmag = cache->mag_cache[CPU->id].last; |
390 | if (cmag) { /* First try local CPU magazines */ |
390 | if (cmag) { /* First try local CPU magazines */ |
391 | if (cmag->busy) |
391 | if (cmag->busy) |
392 | return cmag; |
392 | return cmag; |
393 | 393 | ||
394 | if (lastmag && lastmag->busy) { |
394 | if (lastmag && lastmag->busy) { |
395 | cache->mag_cache[CPU->id].current = lastmag; |
395 | cache->mag_cache[CPU->id].current = lastmag; |
396 | cache->mag_cache[CPU->id].last = cmag; |
396 | cache->mag_cache[CPU->id].last = cmag; |
397 | return lastmag; |
397 | return lastmag; |
398 | } |
398 | } |
399 | } |
399 | } |
400 | /* Local magazines are empty, import one from magazine list */ |
400 | /* Local magazines are empty, import one from magazine list */ |
401 | newmag = get_mag_from_cache(cache, 1); |
401 | newmag = get_mag_from_cache(cache, 1); |
402 | if (!newmag) |
402 | if (!newmag) |
403 | return NULL; |
403 | return NULL; |
404 | 404 | ||
405 | if (lastmag) |
405 | if (lastmag) |
406 | magazine_destroy(cache, lastmag); |
406 | magazine_destroy(cache, lastmag); |
407 | 407 | ||
408 | cache->mag_cache[CPU->id].last = cmag; |
408 | cache->mag_cache[CPU->id].last = cmag; |
409 | cache->mag_cache[CPU->id].current = newmag; |
409 | cache->mag_cache[CPU->id].current = newmag; |
410 | return newmag; |
410 | return newmag; |
411 | } |
411 | } |
412 | 412 | ||
413 | /** |
413 | /** |
414 | * Try to find object in CPU-cache magazines |
414 | * Try to find object in CPU-cache magazines |
415 | * |
415 | * |
416 | * @return Pointer to object or NULL if not available |
416 | * @return Pointer to object or NULL if not available |
417 | */ |
417 | */ |
418 | static void * magazine_obj_get(slab_cache_t *cache) |
418 | static void * magazine_obj_get(slab_cache_t *cache) |
419 | { |
419 | { |
420 | slab_magazine_t *mag; |
420 | slab_magazine_t *mag; |
421 | void *obj; |
421 | void *obj; |
422 | 422 | ||
423 | if (!CPU) |
423 | if (!CPU) |
424 | return NULL; |
424 | return NULL; |
425 | 425 | ||
426 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
426 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
427 | 427 | ||
428 | mag = get_full_current_mag(cache); |
428 | mag = get_full_current_mag(cache); |
429 | if (!mag) { |
429 | if (!mag) { |
430 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
430 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
431 | return NULL; |
431 | return NULL; |
432 | } |
432 | } |
433 | obj = mag->objs[--mag->busy]; |
433 | obj = mag->objs[--mag->busy]; |
434 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
434 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
435 | atomic_dec(&cache->cached_objs); |
435 | atomic_dec(&cache->cached_objs); |
436 | 436 | ||
437 | return obj; |
437 | return obj; |
438 | } |
438 | } |
439 | 439 | ||
440 | /** |
440 | /** |
441 | * Assure that the current magazine is empty, return pointer to it, or NULL if |
441 | * Assure that the current magazine is empty, return pointer to it, or NULL if |
442 | * no empty magazine is available and cannot be allocated |
442 | * no empty magazine is available and cannot be allocated |
443 | * |
443 | * |
444 | * Assume mag_cache[CPU->id].lock is held |
444 | * Assume mag_cache[CPU->id].lock is held |
445 | * |
445 | * |
446 | * We have 2 magazines bound to processor. |
446 | * We have 2 magazines bound to processor. |
447 | * First try the current. |
447 | * First try the current. |
448 | * If full, try the last. |
448 | * If full, try the last. |
449 | * If full, put to magazines list. |
449 | * If full, put to magazines list. |
450 | * allocate new, exchange last & current |
450 | * allocate new, exchange last & current |
451 | * |
451 | * |
452 | */ |
452 | */ |
453 | static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache) |
453 | static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache) |
454 | { |
454 | { |
455 | slab_magazine_t *cmag,*lastmag,*newmag; |
455 | slab_magazine_t *cmag,*lastmag,*newmag; |
456 | 456 | ||
457 | cmag = cache->mag_cache[CPU->id].current; |
457 | cmag = cache->mag_cache[CPU->id].current; |
458 | lastmag = cache->mag_cache[CPU->id].last; |
458 | lastmag = cache->mag_cache[CPU->id].last; |
459 | 459 | ||
460 | if (cmag) { |
460 | if (cmag) { |
461 | if (cmag->busy < cmag->size) |
461 | if (cmag->busy < cmag->size) |
462 | return cmag; |
462 | return cmag; |
463 | if (lastmag && lastmag->busy < lastmag->size) { |
463 | if (lastmag && lastmag->busy < lastmag->size) { |
464 | cache->mag_cache[CPU->id].last = cmag; |
464 | cache->mag_cache[CPU->id].last = cmag; |
465 | cache->mag_cache[CPU->id].current = lastmag; |
465 | cache->mag_cache[CPU->id].current = lastmag; |
466 | return lastmag; |
466 | return lastmag; |
467 | } |
467 | } |
468 | } |
468 | } |
469 | /* current | last are full | nonexistent, allocate new */ |
469 | /* current | last are full | nonexistent, allocate new */ |
470 | /* We do not want to sleep just because of caching */ |
470 | /* We do not want to sleep just because of caching */ |
471 | /* Especially we do not want reclaiming to start, as |
471 | /* Especially we do not want reclaiming to start, as |
472 | * this would deadlock */ |
472 | * this would deadlock */ |
473 | newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); |
473 | newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); |
474 | if (!newmag) |
474 | if (!newmag) |
475 | return NULL; |
475 | return NULL; |
476 | newmag->size = SLAB_MAG_SIZE; |
476 | newmag->size = SLAB_MAG_SIZE; |
477 | newmag->busy = 0; |
477 | newmag->busy = 0; |
478 | 478 | ||
479 | /* Flush last to magazine list */ |
479 | /* Flush last to magazine list */ |
480 | if (lastmag) |
480 | if (lastmag) |
481 | put_mag_to_cache(cache, lastmag); |
481 | put_mag_to_cache(cache, lastmag); |
482 | 482 | ||
483 | /* Move current as last, save new as current */ |
483 | /* Move current as last, save new as current */ |
484 | cache->mag_cache[CPU->id].last = cmag; |
484 | cache->mag_cache[CPU->id].last = cmag; |
485 | cache->mag_cache[CPU->id].current = newmag; |
485 | cache->mag_cache[CPU->id].current = newmag; |
486 | 486 | ||
487 | return newmag; |
487 | return newmag; |
488 | } |
488 | } |
489 | 489 | ||
490 | /** |
490 | /** |
491 | * Put object into CPU-cache magazine |
491 | * Put object into CPU-cache magazine |
492 | * |
492 | * |
493 | * @return 0 - success, -1 - could not get memory |
493 | * @return 0 - success, -1 - could not get memory |
494 | */ |
494 | */ |
495 | static int magazine_obj_put(slab_cache_t *cache, void *obj) |
495 | static int magazine_obj_put(slab_cache_t *cache, void *obj) |
496 | { |
496 | { |
497 | slab_magazine_t *mag; |
497 | slab_magazine_t *mag; |
498 | 498 | ||
499 | if (!CPU) |
499 | if (!CPU) |
500 | return -1; |
500 | return -1; |
501 | 501 | ||
502 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
502 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
503 | 503 | ||
504 | mag = make_empty_current_mag(cache); |
504 | mag = make_empty_current_mag(cache); |
505 | if (!mag) { |
505 | if (!mag) { |
506 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
506 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
507 | return -1; |
507 | return -1; |
508 | } |
508 | } |
509 | 509 | ||
510 | mag->objs[mag->busy++] = obj; |
510 | mag->objs[mag->busy++] = obj; |
511 | 511 | ||
512 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
512 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
513 | atomic_inc(&cache->cached_objs); |
513 | atomic_inc(&cache->cached_objs); |
514 | return 0; |
514 | return 0; |
515 | } |
515 | } |
516 | 516 | ||
517 | 517 | ||
518 | /**************************************/ |
518 | /**************************************/ |
519 | /* Slab cache functions */ |
519 | /* Slab cache functions */ |
520 | 520 | ||
521 | /** Return number of objects that fit in certain cache size */ |
521 | /** Return number of objects that fit in certain cache size */ |
522 | static int comp_objects(slab_cache_t *cache) |
522 | static int comp_objects(slab_cache_t *cache) |
523 | { |
523 | { |
524 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
524 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
525 | return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size; |
525 | return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size; |
526 | else |
526 | else |
527 | return (PAGE_SIZE << cache->order) / cache->size; |
527 | return (PAGE_SIZE << cache->order) / cache->size; |
528 | } |
528 | } |
529 | 529 | ||
530 | /** Return wasted space in slab */ |
530 | /** Return wasted space in slab */ |
531 | static int badness(slab_cache_t *cache) |
531 | static int badness(slab_cache_t *cache) |
532 | { |
532 | { |
533 | int objects; |
533 | int objects; |
534 | int ssize; |
534 | int ssize; |
535 | 535 | ||
536 | objects = comp_objects(cache); |
536 | objects = comp_objects(cache); |
537 | ssize = PAGE_SIZE << cache->order; |
537 | ssize = PAGE_SIZE << cache->order; |
538 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
538 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
539 | ssize -= sizeof(slab_t); |
539 | ssize -= sizeof(slab_t); |
540 | return ssize - objects*cache->size; |
540 | return ssize - objects*cache->size; |
541 | } |
541 | } |
542 | 542 | ||
543 | /** |
543 | /** |
544 | * Initialize mag_cache structure in slab cache |
544 | * Initialize mag_cache structure in slab cache |
545 | */ |
545 | */ |
546 | static void make_magcache(slab_cache_t *cache) |
546 | static void make_magcache(slab_cache_t *cache) |
547 | { |
547 | { |
548 | int i; |
548 | int i; |
549 | 549 | ||
550 | ASSERT(_slab_initialized >= 2); |
550 | ASSERT(_slab_initialized >= 2); |
551 | 551 | ||
552 | cache->mag_cache = malloc(sizeof(slab_mag_cache_t)*config.cpu_count,0); |
552 | cache->mag_cache = malloc(sizeof(slab_mag_cache_t)*config.cpu_count,0); |
553 | for (i=0; i < config.cpu_count; i++) { |
553 | for (i=0; i < config.cpu_count; i++) { |
554 | memsetb((__address)&cache->mag_cache[i], |
554 | memsetb((__address)&cache->mag_cache[i], |
555 | sizeof(cache->mag_cache[i]), 0); |
555 | sizeof(cache->mag_cache[i]), 0); |
556 | spinlock_initialize(&cache->mag_cache[i].lock, |
556 | spinlock_initialize(&cache->mag_cache[i].lock, |
557 | "slab_maglock_cpu"); |
557 | "slab_maglock_cpu"); |
558 | } |
558 | } |
559 | } |
559 | } |
560 | 560 | ||
561 | /** Initialize allocated memory as a slab cache */ |
561 | /** Initialize allocated memory as a slab cache */ |
562 | static void |
562 | static void |
563 | _slab_cache_create(slab_cache_t *cache, |
563 | _slab_cache_create(slab_cache_t *cache, |
564 | char *name, |
564 | char *name, |
565 | size_t size, |
565 | size_t size, |
566 | size_t align, |
566 | size_t align, |
567 | int (*constructor)(void *obj, int kmflag), |
567 | int (*constructor)(void *obj, int kmflag), |
568 | int (*destructor)(void *obj), |
568 | int (*destructor)(void *obj), |
569 | int flags) |
569 | int flags) |
570 | { |
570 | { |
571 | int pages; |
571 | int pages; |
572 | ipl_t ipl; |
572 | ipl_t ipl; |
573 | 573 | ||
574 | memsetb((__address)cache, sizeof(*cache), 0); |
574 | memsetb((__address)cache, sizeof(*cache), 0); |
575 | cache->name = name; |
575 | cache->name = name; |
576 | 576 | ||
577 | if (align < sizeof(__native)) |
577 | if (align < sizeof(__native)) |
578 | align = sizeof(__native); |
578 | align = sizeof(__native); |
579 | size = ALIGN_UP(size, align); |
579 | size = ALIGN_UP(size, align); |
580 | 580 | ||
581 | cache->size = size; |
581 | cache->size = size; |
582 | 582 | ||
583 | cache->constructor = constructor; |
583 | cache->constructor = constructor; |
584 | cache->destructor = destructor; |
584 | cache->destructor = destructor; |
585 | cache->flags = flags; |
585 | cache->flags = flags; |
586 | 586 | ||
587 | list_initialize(&cache->full_slabs); |
587 | list_initialize(&cache->full_slabs); |
588 | list_initialize(&cache->partial_slabs); |
588 | list_initialize(&cache->partial_slabs); |
589 | list_initialize(&cache->magazines); |
589 | list_initialize(&cache->magazines); |
590 | spinlock_initialize(&cache->slablock, "slab_lock"); |
590 | spinlock_initialize(&cache->slablock, "slab_lock"); |
591 | spinlock_initialize(&cache->maglock, "slab_maglock"); |
591 | spinlock_initialize(&cache->maglock, "slab_maglock"); |
592 | if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) |
592 | if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) |
593 | make_magcache(cache); |
593 | make_magcache(cache); |
594 | 594 | ||
595 | /* Compute slab sizes, object counts in slabs etc. */ |
595 | /* Compute slab sizes, object counts in slabs etc. */ |
596 | if (cache->size < SLAB_INSIDE_SIZE) |
596 | if (cache->size < SLAB_INSIDE_SIZE) |
597 | cache->flags |= SLAB_CACHE_SLINSIDE; |
597 | cache->flags |= SLAB_CACHE_SLINSIDE; |
598 | 598 | ||
599 | /* Minimum slab order */ |
599 | /* Minimum slab order */ |
600 | pages = SIZE2FRAMES(cache->size); |
600 | pages = SIZE2FRAMES(cache->size); |
601 | /* We need the 2^order >= pages */ |
601 | /* We need the 2^order >= pages */ |
602 | if (pages == 1) |
602 | if (pages == 1) |
603 | cache->order = 0; |
603 | cache->order = 0; |
604 | else |
604 | else |
605 | cache->order = fnzb(pages-1)+1; |
605 | cache->order = fnzb(pages-1)+1; |
606 | 606 | ||
607 | while (badness(cache) > SLAB_MAX_BADNESS(cache)) { |
607 | while (badness(cache) > SLAB_MAX_BADNESS(cache)) { |
608 | cache->order += 1; |
608 | cache->order += 1; |
609 | } |
609 | } |
610 | cache->objects = comp_objects(cache); |
610 | cache->objects = comp_objects(cache); |
611 | /* If info fits in, put it inside */ |
611 | /* If info fits in, put it inside */ |
612 | if (badness(cache) > sizeof(slab_t)) |
612 | if (badness(cache) > sizeof(slab_t)) |
613 | cache->flags |= SLAB_CACHE_SLINSIDE; |
613 | cache->flags |= SLAB_CACHE_SLINSIDE; |
614 | 614 | ||
615 | /* Add cache to cache list */ |
615 | /* Add cache to cache list */ |
616 | ipl = interrupts_disable(); |
616 | ipl = interrupts_disable(); |
617 | spinlock_lock(&slab_cache_lock); |
617 | spinlock_lock(&slab_cache_lock); |
618 | 618 | ||
619 | list_append(&cache->link, &slab_cache_list); |
619 | list_append(&cache->link, &slab_cache_list); |
620 | 620 | ||
621 | spinlock_unlock(&slab_cache_lock); |
621 | spinlock_unlock(&slab_cache_lock); |
622 | interrupts_restore(ipl); |
622 | interrupts_restore(ipl); |
623 | } |
623 | } |
624 | 624 | ||
625 | /** Create slab cache */ |
625 | /** Create slab cache */ |
626 | slab_cache_t * slab_cache_create(char *name, |
626 | slab_cache_t * slab_cache_create(char *name, |
627 | size_t size, |
627 | size_t size, |
628 | size_t align, |
628 | size_t align, |
629 | int (*constructor)(void *obj, int kmflag), |
629 | int (*constructor)(void *obj, int kmflag), |
630 | int (*destructor)(void *obj), |
630 | int (*destructor)(void *obj), |
631 | int flags) |
631 | int flags) |
632 | { |
632 | { |
633 | slab_cache_t *cache; |
633 | slab_cache_t *cache; |
634 | 634 | ||
635 | cache = slab_alloc(&slab_cache_cache, 0); |
635 | cache = slab_alloc(&slab_cache_cache, 0); |
636 | _slab_cache_create(cache, name, size, align, constructor, destructor, |
636 | _slab_cache_create(cache, name, size, align, constructor, destructor, |
637 | flags); |
637 | flags); |
638 | return cache; |
638 | return cache; |
639 | } |
639 | } |
640 | 640 | ||
641 | /** |
641 | /** |
642 | * Reclaim space occupied by objects that are already free |
642 | * Reclaim space occupied by objects that are already free |
643 | * |
643 | * |
644 | * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing |
644 | * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing |
645 | * @return Number of freed pages |
645 | * @return Number of freed pages |
646 | */ |
646 | */ |
647 | static count_t _slab_reclaim(slab_cache_t *cache, int flags) |
647 | static count_t _slab_reclaim(slab_cache_t *cache, int flags) |
648 | { |
648 | { |
649 | int i; |
649 | int i; |
650 | slab_magazine_t *mag; |
650 | slab_magazine_t *mag; |
651 | count_t frames = 0; |
651 | count_t frames = 0; |
652 | int magcount; |
652 | int magcount; |
653 | 653 | ||
654 | if (cache->flags & SLAB_CACHE_NOMAGAZINE) |
654 | if (cache->flags & SLAB_CACHE_NOMAGAZINE) |
655 | return 0; /* Nothing to do */ |
655 | return 0; /* Nothing to do */ |
656 | 656 | ||
657 | /* We count up to original magazine count to avoid |
657 | /* We count up to original magazine count to avoid |
658 | * endless loop |
658 | * endless loop |
659 | */ |
659 | */ |
660 | magcount = atomic_get(&cache->magazine_counter); |
660 | magcount = atomic_get(&cache->magazine_counter); |
661 | while (magcount-- && (mag=get_mag_from_cache(cache,0))) { |
661 | while (magcount-- && (mag=get_mag_from_cache(cache,0))) { |
662 | frames += magazine_destroy(cache,mag); |
662 | frames += magazine_destroy(cache,mag); |
663 | if (!(flags & SLAB_RECLAIM_ALL) && frames) |
663 | if (!(flags & SLAB_RECLAIM_ALL) && frames) |
664 | break; |
664 | break; |
665 | } |
665 | } |
666 | 666 | ||
667 | if (flags & SLAB_RECLAIM_ALL) { |
667 | if (flags & SLAB_RECLAIM_ALL) { |
668 | /* Free cpu-bound magazines */ |
668 | /* Free cpu-bound magazines */ |
669 | /* Destroy CPU magazines */ |
669 | /* Destroy CPU magazines */ |
670 | for (i=0; i<config.cpu_count; i++) { |
670 | for (i=0; i<config.cpu_count; i++) { |
671 | spinlock_lock(&cache->mag_cache[i].lock); |
671 | spinlock_lock(&cache->mag_cache[i].lock); |
672 | 672 | ||
673 | mag = cache->mag_cache[i].current; |
673 | mag = cache->mag_cache[i].current; |
674 | if (mag) |
674 | if (mag) |
675 | frames += magazine_destroy(cache, mag); |
675 | frames += magazine_destroy(cache, mag); |
676 | cache->mag_cache[i].current = NULL; |
676 | cache->mag_cache[i].current = NULL; |
677 | 677 | ||
678 | mag = cache->mag_cache[i].last; |
678 | mag = cache->mag_cache[i].last; |
679 | if (mag) |
679 | if (mag) |
680 | frames += magazine_destroy(cache, mag); |
680 | frames += magazine_destroy(cache, mag); |
681 | cache->mag_cache[i].last = NULL; |
681 | cache->mag_cache[i].last = NULL; |
682 | 682 | ||
683 | spinlock_unlock(&cache->mag_cache[i].lock); |
683 | spinlock_unlock(&cache->mag_cache[i].lock); |
684 | } |
684 | } |
685 | } |
685 | } |
686 | 686 | ||
687 | return frames; |
687 | return frames; |
688 | } |
688 | } |
689 | 689 | ||
690 | /** Check that there are no slabs and remove cache from system */ |
690 | /** Check that there are no slabs and remove cache from system */ |
691 | void slab_cache_destroy(slab_cache_t *cache) |
691 | void slab_cache_destroy(slab_cache_t *cache) |
692 | { |
692 | { |
693 | ipl_t ipl; |
693 | ipl_t ipl; |
694 | 694 | ||
695 | /* First remove cache from link, so that we don't need |
695 | /* First remove cache from link, so that we don't need |
696 | * to disable interrupts later |
696 | * to disable interrupts later |
697 | */ |
697 | */ |
698 | 698 | ||
699 | ipl = interrupts_disable(); |
699 | ipl = interrupts_disable(); |
700 | spinlock_lock(&slab_cache_lock); |
700 | spinlock_lock(&slab_cache_lock); |
701 | 701 | ||
702 | list_remove(&cache->link); |
702 | list_remove(&cache->link); |
703 | 703 | ||
704 | spinlock_unlock(&slab_cache_lock); |
704 | spinlock_unlock(&slab_cache_lock); |
705 | interrupts_restore(ipl); |
705 | interrupts_restore(ipl); |
706 | 706 | ||
707 | /* Do not lock anything, we assume the software is correct and |
707 | /* Do not lock anything, we assume the software is correct and |
708 | * does not touch the cache when it decides to destroy it */ |
708 | * does not touch the cache when it decides to destroy it */ |
709 | 709 | ||
710 | /* Destroy all magazines */ |
710 | /* Destroy all magazines */ |
711 | _slab_reclaim(cache, SLAB_RECLAIM_ALL); |
711 | _slab_reclaim(cache, SLAB_RECLAIM_ALL); |
712 | 712 | ||
713 | /* All slabs must be empty */ |
713 | /* All slabs must be empty */ |
714 | if (!list_empty(&cache->full_slabs) \ |
714 | if (!list_empty(&cache->full_slabs) \ |
715 | || !list_empty(&cache->partial_slabs)) |
715 | || !list_empty(&cache->partial_slabs)) |
716 | panic("Destroying cache that is not empty."); |
716 | panic("Destroying cache that is not empty."); |
717 | 717 | ||
718 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) |
718 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) |
719 | free(cache->mag_cache); |
719 | free(cache->mag_cache); |
720 | slab_free(&slab_cache_cache, cache); |
720 | slab_free(&slab_cache_cache, cache); |
721 | } |
721 | } |
722 | 722 | ||
723 | /** Allocate new object from cache - if no flags given, always returns |
723 | /** Allocate new object from cache - if no flags given, always returns |
724 | memory */ |
724 | memory */ |
725 | void * slab_alloc(slab_cache_t *cache, int flags) |
725 | void * slab_alloc(slab_cache_t *cache, int flags) |
726 | { |
726 | { |
727 | ipl_t ipl; |
727 | ipl_t ipl; |
728 | void *result = NULL; |
728 | void *result = NULL; |
729 | 729 | ||
730 | /* Disable interrupts to avoid deadlocks with interrupt handlers */ |
730 | /* Disable interrupts to avoid deadlocks with interrupt handlers */ |
731 | ipl = interrupts_disable(); |
731 | ipl = interrupts_disable(); |
732 | 732 | ||
733 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) { |
733 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) { |
734 | result = magazine_obj_get(cache); |
734 | result = magazine_obj_get(cache); |
735 | } |
735 | } |
736 | if (!result) |
736 | if (!result) |
737 | result = slab_obj_create(cache, flags); |
737 | result = slab_obj_create(cache, flags); |
738 | 738 | ||
739 | interrupts_restore(ipl); |
739 | interrupts_restore(ipl); |
740 | 740 | ||
741 | if (result) |
741 | if (result) |
742 | atomic_inc(&cache->allocated_objs); |
742 | atomic_inc(&cache->allocated_objs); |
743 | 743 | ||
744 | return result; |
744 | return result; |
745 | } |
745 | } |
746 | 746 | ||
747 | /** Return object to cache, use slab if known */ |
747 | /** Return object to cache, use slab if known */ |
748 | static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) |
748 | static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) |
749 | { |
749 | { |
750 | ipl_t ipl; |
750 | ipl_t ipl; |
751 | 751 | ||
752 | ipl = interrupts_disable(); |
752 | ipl = interrupts_disable(); |
753 | 753 | ||
754 | if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \ |
754 | if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \ |
755 | || magazine_obj_put(cache, obj)) { |
755 | || magazine_obj_put(cache, obj)) { |
756 | 756 | ||
757 | slab_obj_destroy(cache, obj, slab); |
757 | slab_obj_destroy(cache, obj, slab); |
758 | 758 | ||
759 | } |
759 | } |
760 | interrupts_restore(ipl); |
760 | interrupts_restore(ipl); |
761 | atomic_dec(&cache->allocated_objs); |
761 | atomic_dec(&cache->allocated_objs); |
762 | } |
762 | } |
763 | 763 | ||
764 | /** Return slab object to cache */ |
764 | /** Return slab object to cache */ |
765 | void slab_free(slab_cache_t *cache, void *obj) |
765 | void slab_free(slab_cache_t *cache, void *obj) |
766 | { |
766 | { |
767 | _slab_free(cache,obj,NULL); |
767 | _slab_free(cache,obj,NULL); |
768 | } |
768 | } |
769 | 769 | ||
770 | /* Go through all caches and reclaim what is possible */ |
770 | /* Go through all caches and reclaim what is possible */ |
771 | count_t slab_reclaim(int flags) |
771 | count_t slab_reclaim(int flags) |
772 | { |
772 | { |
773 | slab_cache_t *cache; |
773 | slab_cache_t *cache; |
774 | link_t *cur; |
774 | link_t *cur; |
775 | count_t frames = 0; |
775 | count_t frames = 0; |
776 | 776 | ||
777 | spinlock_lock(&slab_cache_lock); |
777 | spinlock_lock(&slab_cache_lock); |
778 | 778 | ||
779 | /* TODO: Add assert, that interrupts are disabled, otherwise |
779 | /* TODO: Add assert, that interrupts are disabled, otherwise |
780 | * memory allocation from interrupts can deadlock. |
780 | * memory allocation from interrupts can deadlock. |
781 | */ |
781 | */ |
782 | 782 | ||
783 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
783 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
784 | cache = list_get_instance(cur, slab_cache_t, link); |
784 | cache = list_get_instance(cur, slab_cache_t, link); |
785 | frames += _slab_reclaim(cache, flags); |
785 | frames += _slab_reclaim(cache, flags); |
786 | } |
786 | } |
787 | 787 | ||
788 | spinlock_unlock(&slab_cache_lock); |
788 | spinlock_unlock(&slab_cache_lock); |
789 | 789 | ||
790 | return frames; |
790 | return frames; |
791 | } |
791 | } |
792 | 792 | ||
793 | 793 | ||
794 | /* Print list of slabs */ |
794 | /* Print list of slabs */ |
795 | void slab_print_list(void) |
795 | void slab_print_list(void) |
796 | { |
796 | { |
797 | slab_cache_t *cache; |
797 | slab_cache_t *cache; |
798 | link_t *cur; |
798 | link_t *cur; |
799 | ipl_t ipl; |
799 | ipl_t ipl; |
800 | 800 | ||
801 | ipl = interrupts_disable(); |
801 | ipl = interrupts_disable(); |
802 | spinlock_lock(&slab_cache_lock); |
802 | spinlock_lock(&slab_cache_lock); |
803 | printf("slab name\t Osize\t Pages\t Obj/pg\t Slabs\t Cached\tAllocobjs\tCtl\n"); |
803 | printf("slab name\t Osize\t Pages\t Obj/pg\t Slabs\t Cached\tAllocobjs\tCtl\n"); |
804 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
804 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
805 | cache = list_get_instance(cur, slab_cache_t, link); |
805 | cache = list_get_instance(cur, slab_cache_t, link); |
806 | printf("%s\t%7zd\t%7zd\t%7zd\t%7zd\t%7zd\t%7zd\t\t%s\n", cache->name, cache->size, |
806 | printf("%s\t%7zd\t%7zd\t%7zd\t%7zd\t%7zd\t%7zd\t\t%s\n", cache->name, cache->size, |
807 | (1 << cache->order), cache->objects, |
807 | (1 << cache->order), cache->objects, |
808 | atomic_get(&cache->allocated_slabs), |
808 | atomic_get(&cache->allocated_slabs), |
809 | atomic_get(&cache->cached_objs), |
809 | atomic_get(&cache->cached_objs), |
810 | atomic_get(&cache->allocated_objs), |
810 | atomic_get(&cache->allocated_objs), |
811 | cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out"); |
811 | cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out"); |
812 | } |
812 | } |
813 | spinlock_unlock(&slab_cache_lock); |
813 | spinlock_unlock(&slab_cache_lock); |
814 | interrupts_restore(ipl); |
814 | interrupts_restore(ipl); |
815 | } |
815 | } |
816 | 816 | ||
817 | void slab_cache_init(void) |
817 | void slab_cache_init(void) |
818 | { |
818 | { |
819 | int i, size; |
819 | int i, size; |
820 | 820 | ||
821 | /* Initialize magazine cache */ |
821 | /* Initialize magazine cache */ |
822 | _slab_cache_create(&mag_cache, |
822 | _slab_cache_create(&mag_cache, |
823 | "slab_magazine", |
823 | "slab_magazine", |
824 | sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*), |
824 | sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*), |
825 | sizeof(__address), |
825 | sizeof(__address), |
826 | NULL, NULL, |
826 | NULL, NULL, |
827 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
827 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
828 | /* Initialize slab_cache cache */ |
828 | /* Initialize slab_cache cache */ |
829 | _slab_cache_create(&slab_cache_cache, |
829 | _slab_cache_create(&slab_cache_cache, |
830 | "slab_cache", |
830 | "slab_cache", |
831 | sizeof(slab_cache_cache), |
831 | sizeof(slab_cache_cache), |
832 | sizeof(__address), |
832 | sizeof(__address), |
833 | NULL, NULL, |
833 | NULL, NULL, |
834 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
834 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
835 | /* Initialize external slab cache */ |
835 | /* Initialize external slab cache */ |
836 | slab_extern_cache = slab_cache_create("slab_extern", |
836 | slab_extern_cache = slab_cache_create("slab_extern", |
837 | sizeof(slab_t), |
837 | sizeof(slab_t), |
838 | 0, NULL, NULL, |
838 | 0, NULL, NULL, |
839 | SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED); |
839 | SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED); |
840 | 840 | ||
841 | /* Initialize structures for malloc */ |
841 | /* Initialize structures for malloc */ |
842 | for (i=0, size=(1<<SLAB_MIN_MALLOC_W); |
842 | for (i=0, size=(1<<SLAB_MIN_MALLOC_W); |
843 | i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1); |
843 | i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1); |
844 | i++, size <<= 1) { |
844 | i++, size <<= 1) { |
845 | malloc_caches[i] = slab_cache_create(malloc_names[i], |
845 | malloc_caches[i] = slab_cache_create(malloc_names[i], |
846 | size, 0, |
846 | size, 0, |
847 | NULL,NULL, SLAB_CACHE_MAGDEFERRED); |
847 | NULL,NULL, SLAB_CACHE_MAGDEFERRED); |
848 | } |
848 | } |
849 | #ifdef CONFIG_DEBUG |
849 | #ifdef CONFIG_DEBUG |
850 | _slab_initialized = 1; |
850 | _slab_initialized = 1; |
851 | #endif |
851 | #endif |
852 | } |
852 | } |
853 | 853 | ||
854 | /** Enable cpu_cache |
854 | /** Enable cpu_cache |
855 | * |
855 | * |
856 | * Kernel calls this function, when it knows the real number of |
856 | * Kernel calls this function, when it knows the real number of |
857 | * processors. |
857 | * processors. |
858 | * Allocate slab for cpucache and enable it on all existing |
858 | * Allocate slab for cpucache and enable it on all existing |
859 | * slabs that are SLAB_CACHE_MAGDEFERRED |
859 | * slabs that are SLAB_CACHE_MAGDEFERRED |
860 | */ |
860 | */ |
861 | void slab_enable_cpucache(void) |
861 | void slab_enable_cpucache(void) |
862 | { |
862 | { |
863 | link_t *cur; |
863 | link_t *cur; |
864 | slab_cache_t *s; |
864 | slab_cache_t *s; |
865 | 865 | ||
866 | #ifdef CONFIG_DEBUG |
866 | #ifdef CONFIG_DEBUG |
867 | _slab_initialized = 2; |
867 | _slab_initialized = 2; |
868 | #endif |
868 | #endif |
869 | 869 | ||
870 | spinlock_lock(&slab_cache_lock); |
870 | spinlock_lock(&slab_cache_lock); |
871 | 871 | ||
872 | for (cur=slab_cache_list.next; cur != &slab_cache_list;cur=cur->next){ |
872 | for (cur=slab_cache_list.next; cur != &slab_cache_list;cur=cur->next){ |
873 | s = list_get_instance(cur, slab_cache_t, link); |
873 | s = list_get_instance(cur, slab_cache_t, link); |
874 | if ((s->flags & SLAB_CACHE_MAGDEFERRED) != SLAB_CACHE_MAGDEFERRED) |
874 | if ((s->flags & SLAB_CACHE_MAGDEFERRED) != SLAB_CACHE_MAGDEFERRED) |
875 | continue; |
875 | continue; |
876 | make_magcache(s); |
876 | make_magcache(s); |
877 | s->flags &= ~SLAB_CACHE_MAGDEFERRED; |
877 | s->flags &= ~SLAB_CACHE_MAGDEFERRED; |
878 | } |
878 | } |
879 | 879 | ||
880 | spinlock_unlock(&slab_cache_lock); |
880 | spinlock_unlock(&slab_cache_lock); |
881 | } |
881 | } |
882 | 882 | ||
883 | /**************************************/ |
883 | /**************************************/ |
884 | /* kalloc/kfree functions */ |
884 | /* kalloc/kfree functions */ |
885 | void * malloc(unsigned int size, int flags) |
885 | void * malloc(unsigned int size, int flags) |
886 | { |
886 | { |
887 | int idx; |
887 | int idx; |
888 | 888 | ||
889 | ASSERT(_slab_initialized); |
889 | ASSERT(_slab_initialized); |
890 | ASSERT(size && size <= (1 << SLAB_MAX_MALLOC_W)); |
890 | ASSERT(size && size <= (1 << SLAB_MAX_MALLOC_W)); |
891 | 891 | ||
892 | if (size < (1 << SLAB_MIN_MALLOC_W)) |
892 | if (size < (1 << SLAB_MIN_MALLOC_W)) |
893 | size = (1 << SLAB_MIN_MALLOC_W); |
893 | size = (1 << SLAB_MIN_MALLOC_W); |
894 | 894 | ||
895 | idx = fnzb(size-1) - SLAB_MIN_MALLOC_W + 1; |
895 | idx = fnzb(size-1) - SLAB_MIN_MALLOC_W + 1; |
896 | 896 | ||
897 | return slab_alloc(malloc_caches[idx], flags); |
897 | return slab_alloc(malloc_caches[idx], flags); |
898 | } |
898 | } |
899 | 899 | ||
900 | void free(void *obj) |
900 | void free(void *obj) |
901 | { |
901 | { |
902 | slab_t *slab; |
902 | slab_t *slab; |
903 | 903 | ||
904 | if (!obj) return; |
904 | if (!obj) return; |
905 | 905 | ||
906 | slab = obj2slab(obj); |
906 | slab = obj2slab(obj); |
907 | _slab_free(slab->cache, obj, slab); |
907 | _slab_free(slab->cache, obj, slab); |
908 | } |
908 | } |
909 | 909 | ||
910 | /** @} |
910 | /** @} |
911 | */ |
911 | */ |
912 | 912 | ||
913 | 913 |