Rev 1288 | Rev 1554 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 1288 | Rev 1428 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2006 Ondrej Palkovsky |
2 | * Copyright (C) 2006 Ondrej Palkovsky |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /** |
29 | /** |
30 | * @file slab.c |
30 | * @file slab.c |
31 | * @brief Slab allocator. |
31 | * @brief Slab allocator. |
32 | * |
32 | * |
33 | * The slab allocator is closely modelled after OpenSolaris slab allocator. |
33 | * The slab allocator is closely modelled after OpenSolaris slab allocator. |
34 | * @see http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/ |
34 | * @see http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/ |
35 | * |
35 | * |
36 | * with the following exceptions: |
36 | * with the following exceptions: |
37 | * @li empty slabs are deallocated immediately |
37 | * @li empty slabs are deallocated immediately |
38 | * (in Linux they are kept in linked list, in Solaris ???) |
38 | * (in Linux they are kept in linked list, in Solaris ???) |
39 | * @li empty magazines are deallocated when not needed |
39 | * @li empty magazines are deallocated when not needed |
40 | * (in Solaris they are held in linked list in slab cache) |
40 | * (in Solaris they are held in linked list in slab cache) |
41 | * |
41 | * |
42 | * Following features are not currently supported but would be easy to do: |
42 | * Following features are not currently supported but would be easy to do: |
43 | * @li cache coloring |
43 | * @li cache coloring |
44 | * @li dynamic magazine growing (different magazine sizes are already |
44 | * @li dynamic magazine growing (different magazine sizes are already |
45 | * supported, but we would need to adjust allocation strategy) |
45 | * supported, but we would need to adjust allocation strategy) |
46 | * |
46 | * |
47 | * The slab allocator supports per-CPU caches ('magazines') to facilitate |
47 | * The slab allocator supports per-CPU caches ('magazines') to facilitate |
48 | * good SMP scaling. |
48 | * good SMP scaling. |
49 | * |
49 | * |
50 | * When a new object is being allocated, it is first checked, if it is |
50 | * When a new object is being allocated, it is first checked, if it is |
51 | * available in CPU-bound magazine. If it is not found there, it is |
51 | * available in CPU-bound magazine. If it is not found there, it is |
52 | * allocated from CPU-shared slab - if partial full is found, it is used, |
52 | * allocated from CPU-shared slab - if partial full is found, it is used, |
53 | * otherwise a new one is allocated. |
53 | * otherwise a new one is allocated. |
54 | * |
54 | * |
55 | * When an object is being deallocated, it is put to CPU-bound magazine. |
55 | * When an object is being deallocated, it is put to CPU-bound magazine. |
56 | * If there is no such magazine, new one is allocated (if it fails, |
56 | * If there is no such magazine, new one is allocated (if it fails, |
57 | * the object is deallocated into slab). If the magazine is full, it is |
57 | * the object is deallocated into slab). If the magazine is full, it is |
58 | * put into cpu-shared list of magazines and new one is allocated. |
58 | * put into cpu-shared list of magazines and new one is allocated. |
59 | * |
59 | * |
60 | * The CPU-bound magazine is actually a pair of magazine to avoid |
60 | * The CPU-bound magazine is actually a pair of magazine to avoid |
61 | * thrashing when somebody is allocating/deallocating 1 item at the magazine |
61 | * thrashing when somebody is allocating/deallocating 1 item at the magazine |
62 | * size boundary. LIFO order is enforced, which should avoid fragmentation |
62 | * size boundary. LIFO order is enforced, which should avoid fragmentation |
63 | * as much as possible. |
63 | * as much as possible. |
64 | * |
64 | * |
65 | * Every cache contains list of full slabs and list of partialy full slabs. |
65 | * Every cache contains list of full slabs and list of partialy full slabs. |
66 | * Empty slabs are immediately freed (thrashing will be avoided because |
66 | * Empty slabs are immediately freed (thrashing will be avoided because |
67 | * of magazines). |
67 | * of magazines). |
68 | * |
68 | * |
69 | * The slab information structure is kept inside the data area, if possible. |
69 | * The slab information structure is kept inside the data area, if possible. |
70 | * The cache can be marked that it should not use magazines. This is used |
70 | * The cache can be marked that it should not use magazines. This is used |
71 | * only for slab related caches to avoid deadlocks and infinite recursion |
71 | * only for slab related caches to avoid deadlocks and infinite recursion |
72 | * (the slab allocator uses itself for allocating all it's control structures). |
72 | * (the slab allocator uses itself for allocating all it's control structures). |
73 | * |
73 | * |
74 | * The slab allocator allocates lot of space and does not free it. When |
74 | * The slab allocator allocates lot of space and does not free it. When |
75 | * frame allocator fails to allocate the frame, it calls slab_reclaim(). |
75 | * frame allocator fails to allocate the frame, it calls slab_reclaim(). |
76 | * It tries 'light reclaim' first, then brutal reclaim. The light reclaim |
76 | * It tries 'light reclaim' first, then brutal reclaim. The light reclaim |
77 | * releases slabs from cpu-shared magazine-list, until at least 1 slab |
77 | * releases slabs from cpu-shared magazine-list, until at least 1 slab |
78 | * is deallocated in each cache (this algorithm should probably change). |
78 | * is deallocated in each cache (this algorithm should probably change). |
79 | * The brutal reclaim removes all cached objects, even from CPU-bound |
79 | * The brutal reclaim removes all cached objects, even from CPU-bound |
80 | * magazines. |
80 | * magazines. |
81 | * |
81 | * |
82 | * TODO:@n |
82 | * TODO:@n |
83 | * For better CPU-scaling the magazine allocation strategy should |
83 | * For better CPU-scaling the magazine allocation strategy should |
84 | * be extended. Currently, if the cache does not have magazine, it asks |
84 | * be extended. Currently, if the cache does not have magazine, it asks |
85 | * for non-cpu cached magazine cache to provide one. It might be feasible |
85 | * for non-cpu cached magazine cache to provide one. It might be feasible |
86 | * to add cpu-cached magazine cache (which would allocate it's magazines |
86 | * to add cpu-cached magazine cache (which would allocate it's magazines |
87 | * from non-cpu-cached mag. cache). This would provide a nice per-cpu |
87 | * from non-cpu-cached mag. cache). This would provide a nice per-cpu |
88 | * buffer. The other possibility is to use the per-cache |
88 | * buffer. The other possibility is to use the per-cache |
89 | * 'empty-magazine-list', which decreases competing for 1 per-system |
89 | * 'empty-magazine-list', which decreases competing for 1 per-system |
90 | * magazine cache. |
90 | * magazine cache. |
91 | * |
91 | * |
92 | * @li it might be good to add granularity of locks even to slab level, |
92 | * @li it might be good to add granularity of locks even to slab level, |
93 | * we could then try_spinlock over all partial slabs and thus improve |
93 | * we could then try_spinlock over all partial slabs and thus improve |
94 | * scalability even on slab level |
94 | * scalability even on slab level |
95 | */ |
95 | */ |
96 | 96 | ||
97 | #include <synch/spinlock.h> |
97 | #include <synch/spinlock.h> |
98 | #include <mm/slab.h> |
98 | #include <mm/slab.h> |
99 | #include <adt/list.h> |
99 | #include <adt/list.h> |
100 | #include <memstr.h> |
100 | #include <memstr.h> |
101 | #include <align.h> |
101 | #include <align.h> |
102 | #include <mm/frame.h> |
102 | #include <mm/frame.h> |
103 | #include <config.h> |
103 | #include <config.h> |
104 | #include <print.h> |
104 | #include <print.h> |
105 | #include <arch.h> |
105 | #include <arch.h> |
106 | #include <panic.h> |
106 | #include <panic.h> |
107 | #include <debug.h> |
107 | #include <debug.h> |
108 | #include <bitops.h> |
108 | #include <bitops.h> |
109 | 109 | ||
110 | SPINLOCK_INITIALIZE(slab_cache_lock); |
110 | SPINLOCK_INITIALIZE(slab_cache_lock); |
111 | static LIST_INITIALIZE(slab_cache_list); |
111 | static LIST_INITIALIZE(slab_cache_list); |
112 | 112 | ||
113 | /** Magazine cache */ |
113 | /** Magazine cache */ |
114 | static slab_cache_t mag_cache; |
114 | static slab_cache_t mag_cache; |
115 | /** Cache for cache descriptors */ |
115 | /** Cache for cache descriptors */ |
116 | static slab_cache_t slab_cache_cache; |
116 | static slab_cache_t slab_cache_cache; |
117 | /** Cache for external slab descriptors |
117 | /** Cache for external slab descriptors |
118 | * This time we want per-cpu cache, so do not make it static |
118 | * This time we want per-cpu cache, so do not make it static |
119 | * - using slab for internal slab structures will not deadlock, |
119 | * - using slab for internal slab structures will not deadlock, |
120 | * as all slab structures are 'small' - control structures of |
120 | * as all slab structures are 'small' - control structures of |
121 | * their caches do not require further allocation |
121 | * their caches do not require further allocation |
122 | */ |
122 | */ |
123 | static slab_cache_t *slab_extern_cache; |
123 | static slab_cache_t *slab_extern_cache; |
124 | /** Caches for malloc */ |
124 | /** Caches for malloc */ |
125 | static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1]; |
125 | static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1]; |
126 | char *malloc_names[] = { |
126 | char *malloc_names[] = { |
127 | "malloc-16","malloc-32","malloc-64","malloc-128", |
127 | "malloc-16","malloc-32","malloc-64","malloc-128", |
128 | "malloc-256","malloc-512","malloc-1K","malloc-2K", |
128 | "malloc-256","malloc-512","malloc-1K","malloc-2K", |
129 | "malloc-4K","malloc-8K","malloc-16K","malloc-32K", |
129 | "malloc-4K","malloc-8K","malloc-16K","malloc-32K", |
130 | "malloc-64K","malloc-128K" |
130 | "malloc-64K","malloc-128K","malloc-256K" |
131 | }; |
131 | }; |
132 | 132 | ||
133 | /** Slab descriptor */ |
133 | /** Slab descriptor */ |
134 | typedef struct { |
134 | typedef struct { |
135 | slab_cache_t *cache; /**< Pointer to parent cache */ |
135 | slab_cache_t *cache; /**< Pointer to parent cache */ |
136 | link_t link; /* List of full/partial slabs */ |
136 | link_t link; /* List of full/partial slabs */ |
137 | void *start; /**< Start address of first available item */ |
137 | void *start; /**< Start address of first available item */ |
138 | count_t available; /**< Count of available items in this slab */ |
138 | count_t available; /**< Count of available items in this slab */ |
139 | index_t nextavail; /**< The index of next available item */ |
139 | index_t nextavail; /**< The index of next available item */ |
140 | }slab_t; |
140 | }slab_t; |
141 | 141 | ||
142 | #ifdef CONFIG_DEBUG |
142 | #ifdef CONFIG_DEBUG |
143 | static int _slab_initialized = 0; |
143 | static int _slab_initialized = 0; |
144 | #endif |
144 | #endif |
145 | 145 | ||
146 | /**************************************/ |
146 | /**************************************/ |
147 | /* Slab allocation functions */ |
147 | /* Slab allocation functions */ |
148 | 148 | ||
149 | /** |
149 | /** |
150 | * Allocate frames for slab space and initialize |
150 | * Allocate frames for slab space and initialize |
151 | * |
151 | * |
152 | */ |
152 | */ |
153 | static slab_t * slab_space_alloc(slab_cache_t *cache, int flags) |
153 | static slab_t * slab_space_alloc(slab_cache_t *cache, int flags) |
154 | { |
154 | { |
155 | void *data; |
155 | void *data; |
156 | slab_t *slab; |
156 | slab_t *slab; |
157 | size_t fsize; |
157 | size_t fsize; |
158 | int i; |
158 | int i; |
159 | int status; |
159 | int status; |
160 | pfn_t pfn; |
160 | pfn_t pfn; |
161 | int zone=0; |
161 | int zone=0; |
162 | 162 | ||
163 | pfn = frame_alloc_rc_zone(cache->order, FRAME_KA | flags, &status, &zone); |
163 | pfn = frame_alloc_rc_zone(cache->order, FRAME_KA | flags, &status, &zone); |
164 | data = (void *) PA2KA(PFN2ADDR(pfn)); |
164 | data = (void *) PA2KA(PFN2ADDR(pfn)); |
165 | if (status != FRAME_OK) { |
165 | if (status != FRAME_OK) { |
166 | return NULL; |
166 | return NULL; |
167 | } |
167 | } |
168 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) { |
168 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) { |
169 | slab = slab_alloc(slab_extern_cache, flags); |
169 | slab = slab_alloc(slab_extern_cache, flags); |
170 | if (!slab) { |
170 | if (!slab) { |
171 | frame_free(ADDR2PFN(KA2PA(data))); |
171 | frame_free(ADDR2PFN(KA2PA(data))); |
172 | return NULL; |
172 | return NULL; |
173 | } |
173 | } |
174 | } else { |
174 | } else { |
175 | fsize = (PAGE_SIZE << cache->order); |
175 | fsize = (PAGE_SIZE << cache->order); |
176 | slab = data + fsize - sizeof(*slab); |
176 | slab = data + fsize - sizeof(*slab); |
177 | } |
177 | } |
178 | 178 | ||
179 | /* Fill in slab structures */ |
179 | /* Fill in slab structures */ |
180 | for (i=0; i < (1 << cache->order); i++) |
180 | for (i=0; i < (1 << cache->order); i++) |
181 | frame_set_parent(pfn+i, slab, zone); |
181 | frame_set_parent(pfn+i, slab, zone); |
182 | 182 | ||
183 | slab->start = data; |
183 | slab->start = data; |
184 | slab->available = cache->objects; |
184 | slab->available = cache->objects; |
185 | slab->nextavail = 0; |
185 | slab->nextavail = 0; |
186 | slab->cache = cache; |
186 | slab->cache = cache; |
187 | 187 | ||
188 | for (i=0; i<cache->objects;i++) |
188 | for (i=0; i<cache->objects;i++) |
189 | *((int *) (slab->start + i*cache->size)) = i+1; |
189 | *((int *) (slab->start + i*cache->size)) = i+1; |
190 | 190 | ||
191 | atomic_inc(&cache->allocated_slabs); |
191 | atomic_inc(&cache->allocated_slabs); |
192 | return slab; |
192 | return slab; |
193 | } |
193 | } |
194 | 194 | ||
195 | /** |
195 | /** |
196 | * Deallocate space associated with slab |
196 | * Deallocate space associated with slab |
197 | * |
197 | * |
198 | * @return number of freed frames |
198 | * @return number of freed frames |
199 | */ |
199 | */ |
200 | static count_t slab_space_free(slab_cache_t *cache, slab_t *slab) |
200 | static count_t slab_space_free(slab_cache_t *cache, slab_t *slab) |
201 | { |
201 | { |
202 | frame_free(ADDR2PFN(KA2PA(slab->start))); |
202 | frame_free(ADDR2PFN(KA2PA(slab->start))); |
203 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) |
203 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) |
204 | slab_free(slab_extern_cache, slab); |
204 | slab_free(slab_extern_cache, slab); |
205 | 205 | ||
206 | atomic_dec(&cache->allocated_slabs); |
206 | atomic_dec(&cache->allocated_slabs); |
207 | 207 | ||
208 | return 1 << cache->order; |
208 | return 1 << cache->order; |
209 | } |
209 | } |
210 | 210 | ||
211 | /** Map object to slab structure */ |
211 | /** Map object to slab structure */ |
212 | static slab_t * obj2slab(void *obj) |
212 | static slab_t * obj2slab(void *obj) |
213 | { |
213 | { |
214 | return (slab_t *)frame_get_parent(ADDR2PFN(KA2PA(obj)), 0); |
214 | return (slab_t *)frame_get_parent(ADDR2PFN(KA2PA(obj)), 0); |
215 | } |
215 | } |
216 | 216 | ||
217 | /**************************************/ |
217 | /**************************************/ |
218 | /* Slab functions */ |
218 | /* Slab functions */ |
219 | 219 | ||
220 | 220 | ||
221 | /** |
221 | /** |
222 | * Return object to slab and call a destructor |
222 | * Return object to slab and call a destructor |
223 | * |
223 | * |
224 | * @param slab If the caller knows directly slab of the object, otherwise NULL |
224 | * @param slab If the caller knows directly slab of the object, otherwise NULL |
225 | * |
225 | * |
226 | * @return Number of freed pages |
226 | * @return Number of freed pages |
227 | */ |
227 | */ |
228 | static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, |
228 | static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, |
229 | slab_t *slab) |
229 | slab_t *slab) |
230 | { |
230 | { |
231 | int freed = 0; |
231 | int freed = 0; |
232 | 232 | ||
233 | if (!slab) |
233 | if (!slab) |
234 | slab = obj2slab(obj); |
234 | slab = obj2slab(obj); |
235 | 235 | ||
236 | ASSERT(slab->cache == cache); |
236 | ASSERT(slab->cache == cache); |
237 | 237 | ||
238 | if (cache->destructor) |
238 | if (cache->destructor) |
239 | freed = cache->destructor(obj); |
239 | freed = cache->destructor(obj); |
240 | 240 | ||
241 | spinlock_lock(&cache->slablock); |
241 | spinlock_lock(&cache->slablock); |
242 | ASSERT(slab->available < cache->objects); |
242 | ASSERT(slab->available < cache->objects); |
243 | 243 | ||
244 | *((int *)obj) = slab->nextavail; |
244 | *((int *)obj) = slab->nextavail; |
245 | slab->nextavail = (obj - slab->start)/cache->size; |
245 | slab->nextavail = (obj - slab->start)/cache->size; |
246 | slab->available++; |
246 | slab->available++; |
247 | 247 | ||
248 | /* Move it to correct list */ |
248 | /* Move it to correct list */ |
249 | if (slab->available == cache->objects) { |
249 | if (slab->available == cache->objects) { |
250 | /* Free associated memory */ |
250 | /* Free associated memory */ |
251 | list_remove(&slab->link); |
251 | list_remove(&slab->link); |
252 | spinlock_unlock(&cache->slablock); |
252 | spinlock_unlock(&cache->slablock); |
253 | 253 | ||
254 | return freed + slab_space_free(cache, slab); |
254 | return freed + slab_space_free(cache, slab); |
255 | 255 | ||
256 | } else if (slab->available == 1) { |
256 | } else if (slab->available == 1) { |
257 | /* It was in full, move to partial */ |
257 | /* It was in full, move to partial */ |
258 | list_remove(&slab->link); |
258 | list_remove(&slab->link); |
259 | list_prepend(&slab->link, &cache->partial_slabs); |
259 | list_prepend(&slab->link, &cache->partial_slabs); |
260 | } |
260 | } |
261 | spinlock_unlock(&cache->slablock); |
261 | spinlock_unlock(&cache->slablock); |
262 | return freed; |
262 | return freed; |
263 | } |
263 | } |
264 | 264 | ||
265 | /** |
265 | /** |
266 | * Take new object from slab or create new if needed |
266 | * Take new object from slab or create new if needed |
267 | * |
267 | * |
268 | * @return Object address or null |
268 | * @return Object address or null |
269 | */ |
269 | */ |
270 | static void * slab_obj_create(slab_cache_t *cache, int flags) |
270 | static void * slab_obj_create(slab_cache_t *cache, int flags) |
271 | { |
271 | { |
272 | slab_t *slab; |
272 | slab_t *slab; |
273 | void *obj; |
273 | void *obj; |
274 | 274 | ||
275 | spinlock_lock(&cache->slablock); |
275 | spinlock_lock(&cache->slablock); |
276 | 276 | ||
277 | if (list_empty(&cache->partial_slabs)) { |
277 | if (list_empty(&cache->partial_slabs)) { |
278 | /* Allow recursion and reclaiming |
278 | /* Allow recursion and reclaiming |
279 | * - this should work, as the slab control structures |
279 | * - this should work, as the slab control structures |
280 | * are small and do not need to allocate with anything |
280 | * are small and do not need to allocate with anything |
281 | * other than frame_alloc when they are allocating, |
281 | * other than frame_alloc when they are allocating, |
282 | * that's why we should get recursion at most 1-level deep |
282 | * that's why we should get recursion at most 1-level deep |
283 | */ |
283 | */ |
284 | spinlock_unlock(&cache->slablock); |
284 | spinlock_unlock(&cache->slablock); |
285 | slab = slab_space_alloc(cache, flags); |
285 | slab = slab_space_alloc(cache, flags); |
286 | if (!slab) |
286 | if (!slab) |
287 | return NULL; |
287 | return NULL; |
288 | spinlock_lock(&cache->slablock); |
288 | spinlock_lock(&cache->slablock); |
289 | } else { |
289 | } else { |
290 | slab = list_get_instance(cache->partial_slabs.next, |
290 | slab = list_get_instance(cache->partial_slabs.next, |
291 | slab_t, |
291 | slab_t, |
292 | link); |
292 | link); |
293 | list_remove(&slab->link); |
293 | list_remove(&slab->link); |
294 | } |
294 | } |
295 | obj = slab->start + slab->nextavail * cache->size; |
295 | obj = slab->start + slab->nextavail * cache->size; |
296 | slab->nextavail = *((int *)obj); |
296 | slab->nextavail = *((int *)obj); |
297 | slab->available--; |
297 | slab->available--; |
298 | 298 | ||
299 | if (! slab->available) |
299 | if (! slab->available) |
300 | list_prepend(&slab->link, &cache->full_slabs); |
300 | list_prepend(&slab->link, &cache->full_slabs); |
301 | else |
301 | else |
302 | list_prepend(&slab->link, &cache->partial_slabs); |
302 | list_prepend(&slab->link, &cache->partial_slabs); |
303 | 303 | ||
304 | spinlock_unlock(&cache->slablock); |
304 | spinlock_unlock(&cache->slablock); |
305 | 305 | ||
306 | if (cache->constructor && cache->constructor(obj, flags)) { |
306 | if (cache->constructor && cache->constructor(obj, flags)) { |
307 | /* Bad, bad, construction failed */ |
307 | /* Bad, bad, construction failed */ |
308 | slab_obj_destroy(cache, obj, slab); |
308 | slab_obj_destroy(cache, obj, slab); |
309 | return NULL; |
309 | return NULL; |
310 | } |
310 | } |
311 | return obj; |
311 | return obj; |
312 | } |
312 | } |
313 | 313 | ||
314 | /**************************************/ |
314 | /**************************************/ |
315 | /* CPU-Cache slab functions */ |
315 | /* CPU-Cache slab functions */ |
316 | 316 | ||
317 | /** |
317 | /** |
318 | * Finds a full magazine in cache, takes it from list |
318 | * Finds a full magazine in cache, takes it from list |
319 | * and returns it |
319 | * and returns it |
320 | * |
320 | * |
321 | * @param first If true, return first, else last mag |
321 | * @param first If true, return first, else last mag |
322 | */ |
322 | */ |
323 | static slab_magazine_t * get_mag_from_cache(slab_cache_t *cache, |
323 | static slab_magazine_t * get_mag_from_cache(slab_cache_t *cache, |
324 | int first) |
324 | int first) |
325 | { |
325 | { |
326 | slab_magazine_t *mag = NULL; |
326 | slab_magazine_t *mag = NULL; |
327 | link_t *cur; |
327 | link_t *cur; |
328 | 328 | ||
329 | spinlock_lock(&cache->maglock); |
329 | spinlock_lock(&cache->maglock); |
330 | if (!list_empty(&cache->magazines)) { |
330 | if (!list_empty(&cache->magazines)) { |
331 | if (first) |
331 | if (first) |
332 | cur = cache->magazines.next; |
332 | cur = cache->magazines.next; |
333 | else |
333 | else |
334 | cur = cache->magazines.prev; |
334 | cur = cache->magazines.prev; |
335 | mag = list_get_instance(cur, slab_magazine_t, link); |
335 | mag = list_get_instance(cur, slab_magazine_t, link); |
336 | list_remove(&mag->link); |
336 | list_remove(&mag->link); |
337 | atomic_dec(&cache->magazine_counter); |
337 | atomic_dec(&cache->magazine_counter); |
338 | } |
338 | } |
339 | spinlock_unlock(&cache->maglock); |
339 | spinlock_unlock(&cache->maglock); |
340 | return mag; |
340 | return mag; |
341 | } |
341 | } |
342 | 342 | ||
343 | /** Prepend magazine to magazine list in cache */ |
343 | /** Prepend magazine to magazine list in cache */ |
344 | static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag) |
344 | static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag) |
345 | { |
345 | { |
346 | spinlock_lock(&cache->maglock); |
346 | spinlock_lock(&cache->maglock); |
347 | 347 | ||
348 | list_prepend(&mag->link, &cache->magazines); |
348 | list_prepend(&mag->link, &cache->magazines); |
349 | atomic_inc(&cache->magazine_counter); |
349 | atomic_inc(&cache->magazine_counter); |
350 | 350 | ||
351 | spinlock_unlock(&cache->maglock); |
351 | spinlock_unlock(&cache->maglock); |
352 | } |
352 | } |
353 | 353 | ||
354 | /** |
354 | /** |
355 | * Free all objects in magazine and free memory associated with magazine |
355 | * Free all objects in magazine and free memory associated with magazine |
356 | * |
356 | * |
357 | * @return Number of freed pages |
357 | * @return Number of freed pages |
358 | */ |
358 | */ |
359 | static count_t magazine_destroy(slab_cache_t *cache, |
359 | static count_t magazine_destroy(slab_cache_t *cache, |
360 | slab_magazine_t *mag) |
360 | slab_magazine_t *mag) |
361 | { |
361 | { |
362 | int i; |
362 | int i; |
363 | count_t frames = 0; |
363 | count_t frames = 0; |
364 | 364 | ||
365 | for (i=0;i < mag->busy; i++) { |
365 | for (i=0;i < mag->busy; i++) { |
366 | frames += slab_obj_destroy(cache, mag->objs[i], NULL); |
366 | frames += slab_obj_destroy(cache, mag->objs[i], NULL); |
367 | atomic_dec(&cache->cached_objs); |
367 | atomic_dec(&cache->cached_objs); |
368 | } |
368 | } |
369 | 369 | ||
370 | slab_free(&mag_cache, mag); |
370 | slab_free(&mag_cache, mag); |
371 | 371 | ||
372 | return frames; |
372 | return frames; |
373 | } |
373 | } |
374 | 374 | ||
375 | /** |
375 | /** |
376 | * Find full magazine, set it as current and return it |
376 | * Find full magazine, set it as current and return it |
377 | * |
377 | * |
378 | * Assume cpu_magazine lock is held |
378 | * Assume cpu_magazine lock is held |
379 | */ |
379 | */ |
380 | static slab_magazine_t * get_full_current_mag(slab_cache_t *cache) |
380 | static slab_magazine_t * get_full_current_mag(slab_cache_t *cache) |
381 | { |
381 | { |
382 | slab_magazine_t *cmag, *lastmag, *newmag; |
382 | slab_magazine_t *cmag, *lastmag, *newmag; |
383 | 383 | ||
384 | cmag = cache->mag_cache[CPU->id].current; |
384 | cmag = cache->mag_cache[CPU->id].current; |
385 | lastmag = cache->mag_cache[CPU->id].last; |
385 | lastmag = cache->mag_cache[CPU->id].last; |
386 | if (cmag) { /* First try local CPU magazines */ |
386 | if (cmag) { /* First try local CPU magazines */ |
387 | if (cmag->busy) |
387 | if (cmag->busy) |
388 | return cmag; |
388 | return cmag; |
389 | 389 | ||
390 | if (lastmag && lastmag->busy) { |
390 | if (lastmag && lastmag->busy) { |
391 | cache->mag_cache[CPU->id].current = lastmag; |
391 | cache->mag_cache[CPU->id].current = lastmag; |
392 | cache->mag_cache[CPU->id].last = cmag; |
392 | cache->mag_cache[CPU->id].last = cmag; |
393 | return lastmag; |
393 | return lastmag; |
394 | } |
394 | } |
395 | } |
395 | } |
396 | /* Local magazines are empty, import one from magazine list */ |
396 | /* Local magazines are empty, import one from magazine list */ |
397 | newmag = get_mag_from_cache(cache, 1); |
397 | newmag = get_mag_from_cache(cache, 1); |
398 | if (!newmag) |
398 | if (!newmag) |
399 | return NULL; |
399 | return NULL; |
400 | 400 | ||
401 | if (lastmag) |
401 | if (lastmag) |
402 | magazine_destroy(cache, lastmag); |
402 | magazine_destroy(cache, lastmag); |
403 | 403 | ||
404 | cache->mag_cache[CPU->id].last = cmag; |
404 | cache->mag_cache[CPU->id].last = cmag; |
405 | cache->mag_cache[CPU->id].current = newmag; |
405 | cache->mag_cache[CPU->id].current = newmag; |
406 | return newmag; |
406 | return newmag; |
407 | } |
407 | } |
408 | 408 | ||
409 | /** |
409 | /** |
410 | * Try to find object in CPU-cache magazines |
410 | * Try to find object in CPU-cache magazines |
411 | * |
411 | * |
412 | * @return Pointer to object or NULL if not available |
412 | * @return Pointer to object or NULL if not available |
413 | */ |
413 | */ |
414 | static void * magazine_obj_get(slab_cache_t *cache) |
414 | static void * magazine_obj_get(slab_cache_t *cache) |
415 | { |
415 | { |
416 | slab_magazine_t *mag; |
416 | slab_magazine_t *mag; |
417 | void *obj; |
417 | void *obj; |
418 | 418 | ||
419 | if (!CPU) |
419 | if (!CPU) |
420 | return NULL; |
420 | return NULL; |
421 | 421 | ||
422 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
422 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
423 | 423 | ||
424 | mag = get_full_current_mag(cache); |
424 | mag = get_full_current_mag(cache); |
425 | if (!mag) { |
425 | if (!mag) { |
426 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
426 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
427 | return NULL; |
427 | return NULL; |
428 | } |
428 | } |
429 | obj = mag->objs[--mag->busy]; |
429 | obj = mag->objs[--mag->busy]; |
430 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
430 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
431 | atomic_dec(&cache->cached_objs); |
431 | atomic_dec(&cache->cached_objs); |
432 | 432 | ||
433 | return obj; |
433 | return obj; |
434 | } |
434 | } |
435 | 435 | ||
436 | /** |
436 | /** |
437 | * Assure that the current magazine is empty, return pointer to it, or NULL if |
437 | * Assure that the current magazine is empty, return pointer to it, or NULL if |
438 | * no empty magazine is available and cannot be allocated |
438 | * no empty magazine is available and cannot be allocated |
439 | * |
439 | * |
440 | * Assume mag_cache[CPU->id].lock is held |
440 | * Assume mag_cache[CPU->id].lock is held |
441 | * |
441 | * |
442 | * We have 2 magazines bound to processor. |
442 | * We have 2 magazines bound to processor. |
443 | * First try the current. |
443 | * First try the current. |
444 | * If full, try the last. |
444 | * If full, try the last. |
445 | * If full, put to magazines list. |
445 | * If full, put to magazines list. |
446 | * allocate new, exchange last & current |
446 | * allocate new, exchange last & current |
447 | * |
447 | * |
448 | */ |
448 | */ |
449 | static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache) |
449 | static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache) |
450 | { |
450 | { |
451 | slab_magazine_t *cmag,*lastmag,*newmag; |
451 | slab_magazine_t *cmag,*lastmag,*newmag; |
452 | 452 | ||
453 | cmag = cache->mag_cache[CPU->id].current; |
453 | cmag = cache->mag_cache[CPU->id].current; |
454 | lastmag = cache->mag_cache[CPU->id].last; |
454 | lastmag = cache->mag_cache[CPU->id].last; |
455 | 455 | ||
456 | if (cmag) { |
456 | if (cmag) { |
457 | if (cmag->busy < cmag->size) |
457 | if (cmag->busy < cmag->size) |
458 | return cmag; |
458 | return cmag; |
459 | if (lastmag && lastmag->busy < lastmag->size) { |
459 | if (lastmag && lastmag->busy < lastmag->size) { |
460 | cache->mag_cache[CPU->id].last = cmag; |
460 | cache->mag_cache[CPU->id].last = cmag; |
461 | cache->mag_cache[CPU->id].current = lastmag; |
461 | cache->mag_cache[CPU->id].current = lastmag; |
462 | return lastmag; |
462 | return lastmag; |
463 | } |
463 | } |
464 | } |
464 | } |
465 | /* current | last are full | nonexistent, allocate new */ |
465 | /* current | last are full | nonexistent, allocate new */ |
466 | /* We do not want to sleep just because of caching */ |
466 | /* We do not want to sleep just because of caching */ |
467 | /* Especially we do not want reclaiming to start, as |
467 | /* Especially we do not want reclaiming to start, as |
468 | * this would deadlock */ |
468 | * this would deadlock */ |
469 | newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); |
469 | newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); |
470 | if (!newmag) |
470 | if (!newmag) |
471 | return NULL; |
471 | return NULL; |
472 | newmag->size = SLAB_MAG_SIZE; |
472 | newmag->size = SLAB_MAG_SIZE; |
473 | newmag->busy = 0; |
473 | newmag->busy = 0; |
474 | 474 | ||
475 | /* Flush last to magazine list */ |
475 | /* Flush last to magazine list */ |
476 | if (lastmag) |
476 | if (lastmag) |
477 | put_mag_to_cache(cache, lastmag); |
477 | put_mag_to_cache(cache, lastmag); |
478 | 478 | ||
479 | /* Move current as last, save new as current */ |
479 | /* Move current as last, save new as current */ |
480 | cache->mag_cache[CPU->id].last = cmag; |
480 | cache->mag_cache[CPU->id].last = cmag; |
481 | cache->mag_cache[CPU->id].current = newmag; |
481 | cache->mag_cache[CPU->id].current = newmag; |
482 | 482 | ||
483 | return newmag; |
483 | return newmag; |
484 | } |
484 | } |
485 | 485 | ||
486 | /** |
486 | /** |
487 | * Put object into CPU-cache magazine |
487 | * Put object into CPU-cache magazine |
488 | * |
488 | * |
489 | * @return 0 - success, -1 - could not get memory |
489 | * @return 0 - success, -1 - could not get memory |
490 | */ |
490 | */ |
491 | static int magazine_obj_put(slab_cache_t *cache, void *obj) |
491 | static int magazine_obj_put(slab_cache_t *cache, void *obj) |
492 | { |
492 | { |
493 | slab_magazine_t *mag; |
493 | slab_magazine_t *mag; |
494 | 494 | ||
495 | if (!CPU) |
495 | if (!CPU) |
496 | return -1; |
496 | return -1; |
497 | 497 | ||
498 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
498 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
499 | 499 | ||
500 | mag = make_empty_current_mag(cache); |
500 | mag = make_empty_current_mag(cache); |
501 | if (!mag) { |
501 | if (!mag) { |
502 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
502 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
503 | return -1; |
503 | return -1; |
504 | } |
504 | } |
505 | 505 | ||
506 | mag->objs[mag->busy++] = obj; |
506 | mag->objs[mag->busy++] = obj; |
507 | 507 | ||
508 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
508 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
509 | atomic_inc(&cache->cached_objs); |
509 | atomic_inc(&cache->cached_objs); |
510 | return 0; |
510 | return 0; |
511 | } |
511 | } |
512 | 512 | ||
513 | 513 | ||
514 | /**************************************/ |
514 | /**************************************/ |
515 | /* Slab cache functions */ |
515 | /* Slab cache functions */ |
516 | 516 | ||
517 | /** Return number of objects that fit in certain cache size */ |
517 | /** Return number of objects that fit in certain cache size */ |
518 | static int comp_objects(slab_cache_t *cache) |
518 | static int comp_objects(slab_cache_t *cache) |
519 | { |
519 | { |
520 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
520 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
521 | return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size; |
521 | return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size; |
522 | else |
522 | else |
523 | return (PAGE_SIZE << cache->order) / cache->size; |
523 | return (PAGE_SIZE << cache->order) / cache->size; |
524 | } |
524 | } |
525 | 525 | ||
526 | /** Return wasted space in slab */ |
526 | /** Return wasted space in slab */ |
527 | static int badness(slab_cache_t *cache) |
527 | static int badness(slab_cache_t *cache) |
528 | { |
528 | { |
529 | int objects; |
529 | int objects; |
530 | int ssize; |
530 | int ssize; |
531 | 531 | ||
532 | objects = comp_objects(cache); |
532 | objects = comp_objects(cache); |
533 | ssize = PAGE_SIZE << cache->order; |
533 | ssize = PAGE_SIZE << cache->order; |
534 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
534 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
535 | ssize -= sizeof(slab_t); |
535 | ssize -= sizeof(slab_t); |
536 | return ssize - objects*cache->size; |
536 | return ssize - objects*cache->size; |
537 | } |
537 | } |
538 | 538 | ||
539 | /** |
539 | /** |
540 | * Initialize mag_cache structure in slab cache |
540 | * Initialize mag_cache structure in slab cache |
541 | */ |
541 | */ |
542 | static void make_magcache(slab_cache_t *cache) |
542 | static void make_magcache(slab_cache_t *cache) |
543 | { |
543 | { |
544 | int i; |
544 | int i; |
545 | 545 | ||
546 | ASSERT(_slab_initialized >= 2); |
546 | ASSERT(_slab_initialized >= 2); |
547 | 547 | ||
548 | cache->mag_cache = malloc(sizeof(slab_mag_cache_t)*config.cpu_count,0); |
548 | cache->mag_cache = malloc(sizeof(slab_mag_cache_t)*config.cpu_count,0); |
549 | for (i=0; i < config.cpu_count; i++) { |
549 | for (i=0; i < config.cpu_count; i++) { |
550 | memsetb((__address)&cache->mag_cache[i], |
550 | memsetb((__address)&cache->mag_cache[i], |
551 | sizeof(cache->mag_cache[i]), 0); |
551 | sizeof(cache->mag_cache[i]), 0); |
552 | spinlock_initialize(&cache->mag_cache[i].lock, |
552 | spinlock_initialize(&cache->mag_cache[i].lock, |
553 | "slab_maglock_cpu"); |
553 | "slab_maglock_cpu"); |
554 | } |
554 | } |
555 | } |
555 | } |
556 | 556 | ||
557 | /** Initialize allocated memory as a slab cache */ |
557 | /** Initialize allocated memory as a slab cache */ |
558 | static void |
558 | static void |
559 | _slab_cache_create(slab_cache_t *cache, |
559 | _slab_cache_create(slab_cache_t *cache, |
560 | char *name, |
560 | char *name, |
561 | size_t size, |
561 | size_t size, |
562 | size_t align, |
562 | size_t align, |
563 | int (*constructor)(void *obj, int kmflag), |
563 | int (*constructor)(void *obj, int kmflag), |
564 | int (*destructor)(void *obj), |
564 | int (*destructor)(void *obj), |
565 | int flags) |
565 | int flags) |
566 | { |
566 | { |
567 | int pages; |
567 | int pages; |
568 | ipl_t ipl; |
568 | ipl_t ipl; |
569 | 569 | ||
570 | memsetb((__address)cache, sizeof(*cache), 0); |
570 | memsetb((__address)cache, sizeof(*cache), 0); |
571 | cache->name = name; |
571 | cache->name = name; |
572 | 572 | ||
573 | if (align < sizeof(__native)) |
573 | if (align < sizeof(__native)) |
574 | align = sizeof(__native); |
574 | align = sizeof(__native); |
575 | size = ALIGN_UP(size, align); |
575 | size = ALIGN_UP(size, align); |
576 | 576 | ||
577 | cache->size = size; |
577 | cache->size = size; |
578 | 578 | ||
579 | cache->constructor = constructor; |
579 | cache->constructor = constructor; |
580 | cache->destructor = destructor; |
580 | cache->destructor = destructor; |
581 | cache->flags = flags; |
581 | cache->flags = flags; |
582 | 582 | ||
583 | list_initialize(&cache->full_slabs); |
583 | list_initialize(&cache->full_slabs); |
584 | list_initialize(&cache->partial_slabs); |
584 | list_initialize(&cache->partial_slabs); |
585 | list_initialize(&cache->magazines); |
585 | list_initialize(&cache->magazines); |
586 | spinlock_initialize(&cache->slablock, "slab_lock"); |
586 | spinlock_initialize(&cache->slablock, "slab_lock"); |
587 | spinlock_initialize(&cache->maglock, "slab_maglock"); |
587 | spinlock_initialize(&cache->maglock, "slab_maglock"); |
588 | if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) |
588 | if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) |
589 | make_magcache(cache); |
589 | make_magcache(cache); |
590 | 590 | ||
591 | /* Compute slab sizes, object counts in slabs etc. */ |
591 | /* Compute slab sizes, object counts in slabs etc. */ |
592 | if (cache->size < SLAB_INSIDE_SIZE) |
592 | if (cache->size < SLAB_INSIDE_SIZE) |
593 | cache->flags |= SLAB_CACHE_SLINSIDE; |
593 | cache->flags |= SLAB_CACHE_SLINSIDE; |
594 | 594 | ||
595 | /* Minimum slab order */ |
595 | /* Minimum slab order */ |
596 | pages = ((cache->size-1) >> PAGE_WIDTH) + 1; |
596 | pages = ((cache->size-1) >> PAGE_WIDTH) + 1; |
597 | cache->order = fnzb(pages); |
597 | cache->order = fnzb(pages); |
598 | 598 | ||
599 | while (badness(cache) > SLAB_MAX_BADNESS(cache)) { |
599 | while (badness(cache) > SLAB_MAX_BADNESS(cache)) { |
600 | cache->order += 1; |
600 | cache->order += 1; |
601 | } |
601 | } |
602 | cache->objects = comp_objects(cache); |
602 | cache->objects = comp_objects(cache); |
603 | /* If info fits in, put it inside */ |
603 | /* If info fits in, put it inside */ |
604 | if (badness(cache) > sizeof(slab_t)) |
604 | if (badness(cache) > sizeof(slab_t)) |
605 | cache->flags |= SLAB_CACHE_SLINSIDE; |
605 | cache->flags |= SLAB_CACHE_SLINSIDE; |
606 | 606 | ||
607 | /* Add cache to cache list */ |
607 | /* Add cache to cache list */ |
608 | ipl = interrupts_disable(); |
608 | ipl = interrupts_disable(); |
609 | spinlock_lock(&slab_cache_lock); |
609 | spinlock_lock(&slab_cache_lock); |
610 | 610 | ||
611 | list_append(&cache->link, &slab_cache_list); |
611 | list_append(&cache->link, &slab_cache_list); |
612 | 612 | ||
613 | spinlock_unlock(&slab_cache_lock); |
613 | spinlock_unlock(&slab_cache_lock); |
614 | interrupts_restore(ipl); |
614 | interrupts_restore(ipl); |
615 | } |
615 | } |
616 | 616 | ||
617 | /** Create slab cache */ |
617 | /** Create slab cache */ |
618 | slab_cache_t * slab_cache_create(char *name, |
618 | slab_cache_t * slab_cache_create(char *name, |
619 | size_t size, |
619 | size_t size, |
620 | size_t align, |
620 | size_t align, |
621 | int (*constructor)(void *obj, int kmflag), |
621 | int (*constructor)(void *obj, int kmflag), |
622 | int (*destructor)(void *obj), |
622 | int (*destructor)(void *obj), |
623 | int flags) |
623 | int flags) |
624 | { |
624 | { |
625 | slab_cache_t *cache; |
625 | slab_cache_t *cache; |
626 | 626 | ||
627 | cache = slab_alloc(&slab_cache_cache, 0); |
627 | cache = slab_alloc(&slab_cache_cache, 0); |
628 | _slab_cache_create(cache, name, size, align, constructor, destructor, |
628 | _slab_cache_create(cache, name, size, align, constructor, destructor, |
629 | flags); |
629 | flags); |
630 | return cache; |
630 | return cache; |
631 | } |
631 | } |
632 | 632 | ||
633 | /** |
633 | /** |
634 | * Reclaim space occupied by objects that are already free |
634 | * Reclaim space occupied by objects that are already free |
635 | * |
635 | * |
636 | * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing |
636 | * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing |
637 | * @return Number of freed pages |
637 | * @return Number of freed pages |
638 | */ |
638 | */ |
639 | static count_t _slab_reclaim(slab_cache_t *cache, int flags) |
639 | static count_t _slab_reclaim(slab_cache_t *cache, int flags) |
640 | { |
640 | { |
641 | int i; |
641 | int i; |
642 | slab_magazine_t *mag; |
642 | slab_magazine_t *mag; |
643 | count_t frames = 0; |
643 | count_t frames = 0; |
644 | int magcount; |
644 | int magcount; |
645 | 645 | ||
646 | if (cache->flags & SLAB_CACHE_NOMAGAZINE) |
646 | if (cache->flags & SLAB_CACHE_NOMAGAZINE) |
647 | return 0; /* Nothing to do */ |
647 | return 0; /* Nothing to do */ |
648 | 648 | ||
649 | /* We count up to original magazine count to avoid |
649 | /* We count up to original magazine count to avoid |
650 | * endless loop |
650 | * endless loop |
651 | */ |
651 | */ |
652 | magcount = atomic_get(&cache->magazine_counter); |
652 | magcount = atomic_get(&cache->magazine_counter); |
653 | while (magcount-- && (mag=get_mag_from_cache(cache,0))) { |
653 | while (magcount-- && (mag=get_mag_from_cache(cache,0))) { |
654 | frames += magazine_destroy(cache,mag); |
654 | frames += magazine_destroy(cache,mag); |
655 | if (!(flags & SLAB_RECLAIM_ALL) && frames) |
655 | if (!(flags & SLAB_RECLAIM_ALL) && frames) |
656 | break; |
656 | break; |
657 | } |
657 | } |
658 | 658 | ||
659 | if (flags & SLAB_RECLAIM_ALL) { |
659 | if (flags & SLAB_RECLAIM_ALL) { |
660 | /* Free cpu-bound magazines */ |
660 | /* Free cpu-bound magazines */ |
661 | /* Destroy CPU magazines */ |
661 | /* Destroy CPU magazines */ |
662 | for (i=0; i<config.cpu_count; i++) { |
662 | for (i=0; i<config.cpu_count; i++) { |
663 | spinlock_lock(&cache->mag_cache[i].lock); |
663 | spinlock_lock(&cache->mag_cache[i].lock); |
664 | 664 | ||
665 | mag = cache->mag_cache[i].current; |
665 | mag = cache->mag_cache[i].current; |
666 | if (mag) |
666 | if (mag) |
667 | frames += magazine_destroy(cache, mag); |
667 | frames += magazine_destroy(cache, mag); |
668 | cache->mag_cache[i].current = NULL; |
668 | cache->mag_cache[i].current = NULL; |
669 | 669 | ||
670 | mag = cache->mag_cache[i].last; |
670 | mag = cache->mag_cache[i].last; |
671 | if (mag) |
671 | if (mag) |
672 | frames += magazine_destroy(cache, mag); |
672 | frames += magazine_destroy(cache, mag); |
673 | cache->mag_cache[i].last = NULL; |
673 | cache->mag_cache[i].last = NULL; |
674 | 674 | ||
675 | spinlock_unlock(&cache->mag_cache[i].lock); |
675 | spinlock_unlock(&cache->mag_cache[i].lock); |
676 | } |
676 | } |
677 | } |
677 | } |
678 | 678 | ||
679 | return frames; |
679 | return frames; |
680 | } |
680 | } |
681 | 681 | ||
682 | /** Check that there are no slabs and remove cache from system */ |
682 | /** Check that there are no slabs and remove cache from system */ |
683 | void slab_cache_destroy(slab_cache_t *cache) |
683 | void slab_cache_destroy(slab_cache_t *cache) |
684 | { |
684 | { |
685 | ipl_t ipl; |
685 | ipl_t ipl; |
686 | 686 | ||
687 | /* First remove cache from link, so that we don't need |
687 | /* First remove cache from link, so that we don't need |
688 | * to disable interrupts later |
688 | * to disable interrupts later |
689 | */ |
689 | */ |
690 | 690 | ||
691 | ipl = interrupts_disable(); |
691 | ipl = interrupts_disable(); |
692 | spinlock_lock(&slab_cache_lock); |
692 | spinlock_lock(&slab_cache_lock); |
693 | 693 | ||
694 | list_remove(&cache->link); |
694 | list_remove(&cache->link); |
695 | 695 | ||
696 | spinlock_unlock(&slab_cache_lock); |
696 | spinlock_unlock(&slab_cache_lock); |
697 | interrupts_restore(ipl); |
697 | interrupts_restore(ipl); |
698 | 698 | ||
699 | /* Do not lock anything, we assume the software is correct and |
699 | /* Do not lock anything, we assume the software is correct and |
700 | * does not touch the cache when it decides to destroy it */ |
700 | * does not touch the cache when it decides to destroy it */ |
701 | 701 | ||
702 | /* Destroy all magazines */ |
702 | /* Destroy all magazines */ |
703 | _slab_reclaim(cache, SLAB_RECLAIM_ALL); |
703 | _slab_reclaim(cache, SLAB_RECLAIM_ALL); |
704 | 704 | ||
705 | /* All slabs must be empty */ |
705 | /* All slabs must be empty */ |
706 | if (!list_empty(&cache->full_slabs) \ |
706 | if (!list_empty(&cache->full_slabs) \ |
707 | || !list_empty(&cache->partial_slabs)) |
707 | || !list_empty(&cache->partial_slabs)) |
708 | panic("Destroying cache that is not empty."); |
708 | panic("Destroying cache that is not empty."); |
709 | 709 | ||
710 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) |
710 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) |
711 | free(cache->mag_cache); |
711 | free(cache->mag_cache); |
712 | slab_free(&slab_cache_cache, cache); |
712 | slab_free(&slab_cache_cache, cache); |
713 | } |
713 | } |
714 | 714 | ||
715 | /** Allocate new object from cache - if no flags given, always returns |
715 | /** Allocate new object from cache - if no flags given, always returns |
716 | memory */ |
716 | memory */ |
717 | void * slab_alloc(slab_cache_t *cache, int flags) |
717 | void * slab_alloc(slab_cache_t *cache, int flags) |
718 | { |
718 | { |
719 | ipl_t ipl; |
719 | ipl_t ipl; |
720 | void *result = NULL; |
720 | void *result = NULL; |
721 | 721 | ||
722 | /* Disable interrupts to avoid deadlocks with interrupt handlers */ |
722 | /* Disable interrupts to avoid deadlocks with interrupt handlers */ |
723 | ipl = interrupts_disable(); |
723 | ipl = interrupts_disable(); |
724 | 724 | ||
725 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) { |
725 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) { |
726 | result = magazine_obj_get(cache); |
726 | result = magazine_obj_get(cache); |
727 | } |
727 | } |
728 | if (!result) |
728 | if (!result) |
729 | result = slab_obj_create(cache, flags); |
729 | result = slab_obj_create(cache, flags); |
730 | 730 | ||
731 | interrupts_restore(ipl); |
731 | interrupts_restore(ipl); |
732 | 732 | ||
733 | if (result) |
733 | if (result) |
734 | atomic_inc(&cache->allocated_objs); |
734 | atomic_inc(&cache->allocated_objs); |
735 | 735 | ||
736 | return result; |
736 | return result; |
737 | } |
737 | } |
738 | 738 | ||
739 | /** Return object to cache, use slab if known */ |
739 | /** Return object to cache, use slab if known */ |
740 | static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) |
740 | static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) |
741 | { |
741 | { |
742 | ipl_t ipl; |
742 | ipl_t ipl; |
743 | 743 | ||
744 | ipl = interrupts_disable(); |
744 | ipl = interrupts_disable(); |
745 | 745 | ||
746 | if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \ |
746 | if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \ |
747 | || magazine_obj_put(cache, obj)) { |
747 | || magazine_obj_put(cache, obj)) { |
748 | 748 | ||
749 | slab_obj_destroy(cache, obj, slab); |
749 | slab_obj_destroy(cache, obj, slab); |
750 | 750 | ||
751 | } |
751 | } |
752 | interrupts_restore(ipl); |
752 | interrupts_restore(ipl); |
753 | atomic_dec(&cache->allocated_objs); |
753 | atomic_dec(&cache->allocated_objs); |
754 | } |
754 | } |
755 | 755 | ||
756 | /** Return slab object to cache */ |
756 | /** Return slab object to cache */ |
757 | void slab_free(slab_cache_t *cache, void *obj) |
757 | void slab_free(slab_cache_t *cache, void *obj) |
758 | { |
758 | { |
759 | _slab_free(cache,obj,NULL); |
759 | _slab_free(cache,obj,NULL); |
760 | } |
760 | } |
761 | 761 | ||
762 | /* Go through all caches and reclaim what is possible */ |
762 | /* Go through all caches and reclaim what is possible */ |
763 | count_t slab_reclaim(int flags) |
763 | count_t slab_reclaim(int flags) |
764 | { |
764 | { |
765 | slab_cache_t *cache; |
765 | slab_cache_t *cache; |
766 | link_t *cur; |
766 | link_t *cur; |
767 | count_t frames = 0; |
767 | count_t frames = 0; |
768 | 768 | ||
769 | spinlock_lock(&slab_cache_lock); |
769 | spinlock_lock(&slab_cache_lock); |
770 | 770 | ||
771 | /* TODO: Add assert, that interrupts are disabled, otherwise |
771 | /* TODO: Add assert, that interrupts are disabled, otherwise |
772 | * memory allocation from interrupts can deadlock. |
772 | * memory allocation from interrupts can deadlock. |
773 | */ |
773 | */ |
774 | 774 | ||
775 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
775 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
776 | cache = list_get_instance(cur, slab_cache_t, link); |
776 | cache = list_get_instance(cur, slab_cache_t, link); |
777 | frames += _slab_reclaim(cache, flags); |
777 | frames += _slab_reclaim(cache, flags); |
778 | } |
778 | } |
779 | 779 | ||
780 | spinlock_unlock(&slab_cache_lock); |
780 | spinlock_unlock(&slab_cache_lock); |
781 | 781 | ||
782 | return frames; |
782 | return frames; |
783 | } |
783 | } |
784 | 784 | ||
785 | 785 | ||
786 | /* Print list of slabs */ |
786 | /* Print list of slabs */ |
787 | void slab_print_list(void) |
787 | void slab_print_list(void) |
788 | { |
788 | { |
789 | slab_cache_t *cache; |
789 | slab_cache_t *cache; |
790 | link_t *cur; |
790 | link_t *cur; |
791 | ipl_t ipl; |
791 | ipl_t ipl; |
792 | 792 | ||
793 | ipl = interrupts_disable(); |
793 | ipl = interrupts_disable(); |
794 | spinlock_lock(&slab_cache_lock); |
794 | spinlock_lock(&slab_cache_lock); |
795 | printf("slab name\t Osize\t Pages\t Obj/pg\t Slabs\t Cached\tAllocobjs\tCtl\n"); |
795 | printf("slab name\t Osize\t Pages\t Obj/pg\t Slabs\t Cached\tAllocobjs\tCtl\n"); |
796 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
796 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
797 | cache = list_get_instance(cur, slab_cache_t, link); |
797 | cache = list_get_instance(cur, slab_cache_t, link); |
798 | printf("%s\t%7zd\t%7zd\t%7zd\t%7zd\t%7zd\t%7zd\t\t%s\n", cache->name, cache->size, |
798 | printf("%s\t%7zd\t%7zd\t%7zd\t%7zd\t%7zd\t%7zd\t\t%s\n", cache->name, cache->size, |
799 | (1 << cache->order), cache->objects, |
799 | (1 << cache->order), cache->objects, |
800 | atomic_get(&cache->allocated_slabs), |
800 | atomic_get(&cache->allocated_slabs), |
801 | atomic_get(&cache->cached_objs), |
801 | atomic_get(&cache->cached_objs), |
802 | atomic_get(&cache->allocated_objs), |
802 | atomic_get(&cache->allocated_objs), |
803 | cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out"); |
803 | cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out"); |
804 | } |
804 | } |
805 | spinlock_unlock(&slab_cache_lock); |
805 | spinlock_unlock(&slab_cache_lock); |
806 | interrupts_restore(ipl); |
806 | interrupts_restore(ipl); |
807 | } |
807 | } |
808 | 808 | ||
809 | void slab_cache_init(void) |
809 | void slab_cache_init(void) |
810 | { |
810 | { |
811 | int i, size; |
811 | int i, size; |
812 | 812 | ||
813 | /* Initialize magazine cache */ |
813 | /* Initialize magazine cache */ |
814 | _slab_cache_create(&mag_cache, |
814 | _slab_cache_create(&mag_cache, |
815 | "slab_magazine", |
815 | "slab_magazine", |
816 | sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*), |
816 | sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*), |
817 | sizeof(__address), |
817 | sizeof(__address), |
818 | NULL, NULL, |
818 | NULL, NULL, |
819 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
819 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
820 | /* Initialize slab_cache cache */ |
820 | /* Initialize slab_cache cache */ |
821 | _slab_cache_create(&slab_cache_cache, |
821 | _slab_cache_create(&slab_cache_cache, |
822 | "slab_cache", |
822 | "slab_cache", |
823 | sizeof(slab_cache_cache), |
823 | sizeof(slab_cache_cache), |
824 | sizeof(__address), |
824 | sizeof(__address), |
825 | NULL, NULL, |
825 | NULL, NULL, |
826 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
826 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
827 | /* Initialize external slab cache */ |
827 | /* Initialize external slab cache */ |
828 | slab_extern_cache = slab_cache_create("slab_extern", |
828 | slab_extern_cache = slab_cache_create("slab_extern", |
829 | sizeof(slab_t), |
829 | sizeof(slab_t), |
830 | 0, NULL, NULL, |
830 | 0, NULL, NULL, |
831 | SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED); |
831 | SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED); |
832 | 832 | ||
833 | /* Initialize structures for malloc */ |
833 | /* Initialize structures for malloc */ |
834 | for (i=0, size=(1<<SLAB_MIN_MALLOC_W); |
834 | for (i=0, size=(1<<SLAB_MIN_MALLOC_W); |
835 | i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1); |
835 | i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1); |
836 | i++, size <<= 1) { |
836 | i++, size <<= 1) { |
837 | malloc_caches[i] = slab_cache_create(malloc_names[i], |
837 | malloc_caches[i] = slab_cache_create(malloc_names[i], |
838 | size, 0, |
838 | size, 0, |
839 | NULL,NULL, SLAB_CACHE_MAGDEFERRED); |
839 | NULL,NULL, SLAB_CACHE_MAGDEFERRED); |
840 | } |
840 | } |
841 | #ifdef CONFIG_DEBUG |
841 | #ifdef CONFIG_DEBUG |
842 | _slab_initialized = 1; |
842 | _slab_initialized = 1; |
843 | #endif |
843 | #endif |
844 | } |
844 | } |
845 | 845 | ||
846 | /** Enable cpu_cache |
846 | /** Enable cpu_cache |
847 | * |
847 | * |
848 | * Kernel calls this function, when it knows the real number of |
848 | * Kernel calls this function, when it knows the real number of |
849 | * processors. |
849 | * processors. |
850 | * Allocate slab for cpucache and enable it on all existing |
850 | * Allocate slab for cpucache and enable it on all existing |
851 | * slabs that are SLAB_CACHE_MAGDEFERRED |
851 | * slabs that are SLAB_CACHE_MAGDEFERRED |
852 | */ |
852 | */ |
853 | void slab_enable_cpucache(void) |
853 | void slab_enable_cpucache(void) |
854 | { |
854 | { |
855 | link_t *cur; |
855 | link_t *cur; |
856 | slab_cache_t *s; |
856 | slab_cache_t *s; |
857 | 857 | ||
858 | #ifdef CONFIG_DEBUG |
858 | #ifdef CONFIG_DEBUG |
859 | _slab_initialized = 2; |
859 | _slab_initialized = 2; |
860 | #endif |
860 | #endif |
861 | 861 | ||
862 | spinlock_lock(&slab_cache_lock); |
862 | spinlock_lock(&slab_cache_lock); |
863 | 863 | ||
864 | for (cur=slab_cache_list.next; cur != &slab_cache_list;cur=cur->next){ |
864 | for (cur=slab_cache_list.next; cur != &slab_cache_list;cur=cur->next){ |
865 | s = list_get_instance(cur, slab_cache_t, link); |
865 | s = list_get_instance(cur, slab_cache_t, link); |
866 | if ((s->flags & SLAB_CACHE_MAGDEFERRED) != SLAB_CACHE_MAGDEFERRED) |
866 | if ((s->flags & SLAB_CACHE_MAGDEFERRED) != SLAB_CACHE_MAGDEFERRED) |
867 | continue; |
867 | continue; |
868 | make_magcache(s); |
868 | make_magcache(s); |
869 | s->flags &= ~SLAB_CACHE_MAGDEFERRED; |
869 | s->flags &= ~SLAB_CACHE_MAGDEFERRED; |
870 | } |
870 | } |
871 | 871 | ||
872 | spinlock_unlock(&slab_cache_lock); |
872 | spinlock_unlock(&slab_cache_lock); |
873 | } |
873 | } |
874 | 874 | ||
875 | /**************************************/ |
875 | /**************************************/ |
876 | /* kalloc/kfree functions */ |
876 | /* kalloc/kfree functions */ |
877 | void * malloc(unsigned int size, int flags) |
877 | void * malloc(unsigned int size, int flags) |
878 | { |
878 | { |
879 | int idx; |
879 | int idx; |
880 | 880 | ||
881 | ASSERT(_slab_initialized); |
881 | ASSERT(_slab_initialized); |
882 | ASSERT(size && size <= (1 << SLAB_MAX_MALLOC_W)); |
882 | ASSERT(size && size <= (1 << SLAB_MAX_MALLOC_W)); |
883 | 883 | ||
884 | if (size < (1 << SLAB_MIN_MALLOC_W)) |
884 | if (size < (1 << SLAB_MIN_MALLOC_W)) |
885 | size = (1 << SLAB_MIN_MALLOC_W); |
885 | size = (1 << SLAB_MIN_MALLOC_W); |
886 | 886 | ||
887 | idx = fnzb(size-1) - SLAB_MIN_MALLOC_W + 1; |
887 | idx = fnzb(size-1) - SLAB_MIN_MALLOC_W + 1; |
888 | 888 | ||
889 | return slab_alloc(malloc_caches[idx], flags); |
889 | return slab_alloc(malloc_caches[idx], flags); |
890 | } |
890 | } |
891 | 891 | ||
892 | void free(void *obj) |
892 | void free(void *obj) |
893 | { |
893 | { |
894 | slab_t *slab; |
894 | slab_t *slab; |
895 | 895 | ||
896 | if (!obj) return; |
896 | if (!obj) return; |
897 | 897 | ||
898 | slab = obj2slab(obj); |
898 | slab = obj2slab(obj); |
899 | _slab_free(slab->cache, obj, slab); |
899 | _slab_free(slab->cache, obj, slab); |
900 | } |
900 | } |
901 | 901 |