Rev 791 | Rev 822 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 791 | Rev 814 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2006 Ondrej Palkovsky |
2 | * Copyright (C) 2006 Ondrej Palkovsky |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /* |
29 | /* |
30 | * The SLAB allocator is closely modelled after OpenSolaris SLAB allocator |
30 | * The SLAB allocator is closely modelled after OpenSolaris SLAB allocator |
31 | * http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/ |
31 | * http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/ |
32 | * |
32 | * |
33 | * with the following exceptions: |
33 | * with the following exceptions: |
34 | * - empty SLABS are deallocated immediately |
34 | * - empty SLABS are deallocated immediately |
35 | * (in Linux they are kept in linked list, in Solaris ???) |
35 | * (in Linux they are kept in linked list, in Solaris ???) |
36 | * - empty magazines are deallocated when not needed |
36 | * - empty magazines are deallocated when not needed |
37 | * (in Solaris they are held in linked list in slab cache) |
37 | * (in Solaris they are held in linked list in slab cache) |
38 | * |
38 | * |
39 | * Following features are not currently supported but would be easy to do: |
39 | * Following features are not currently supported but would be easy to do: |
40 | * - cache coloring |
40 | * - cache coloring |
41 | * - dynamic magazine growing (different magazine sizes are already |
41 | * - dynamic magazine growing (different magazine sizes are already |
42 | * supported, but we would need to adjust allocating strategy) |
42 | * supported, but we would need to adjust allocating strategy) |
43 | * |
43 | * |
44 | * The SLAB allocator supports per-CPU caches ('magazines') to facilitate |
44 | * The SLAB allocator supports per-CPU caches ('magazines') to facilitate |
45 | * good SMP scaling. |
45 | * good SMP scaling. |
46 | * |
46 | * |
47 | * When a new object is being allocated, it is first checked, if it is |
47 | * When a new object is being allocated, it is first checked, if it is |
48 | * available in CPU-bound magazine. If it is not found there, it is |
48 | * available in CPU-bound magazine. If it is not found there, it is |
49 | * allocated from CPU-shared SLAB - if partial full is found, it is used, |
49 | * allocated from CPU-shared SLAB - if partial full is found, it is used, |
50 | * otherwise a new one is allocated. |
50 | * otherwise a new one is allocated. |
51 | * |
51 | * |
52 | * When an object is being deallocated, it is put to CPU-bound magazine. |
52 | * When an object is being deallocated, it is put to CPU-bound magazine. |
53 | * If there is no such magazine, new one is allocated (if it fails, |
53 | * If there is no such magazine, new one is allocated (if it fails, |
54 | * the object is deallocated into SLAB). If the magazine is full, it is |
54 | * the object is deallocated into SLAB). If the magazine is full, it is |
55 | * put into cpu-shared list of magazines and new one is allocated. |
55 | * put into cpu-shared list of magazines and new one is allocated. |
56 | * |
56 | * |
57 | * The CPU-bound magazine is actually a pair of magazine to avoid |
57 | * The CPU-bound magazine is actually a pair of magazine to avoid |
58 | * thrashing when somebody is allocating/deallocating 1 item at the magazine |
58 | * thrashing when somebody is allocating/deallocating 1 item at the magazine |
59 | * size boundary. LIFO order is enforced, which should avoid fragmentation |
59 | * size boundary. LIFO order is enforced, which should avoid fragmentation |
60 | * as much as possible. |
60 | * as much as possible. |
61 | * |
61 | * |
62 | * Every cache contains list of full slabs and list of partialy full slabs. |
62 | * Every cache contains list of full slabs and list of partialy full slabs. |
63 | * Empty SLABS are immediately freed (thrashing will be avoided because |
63 | * Empty SLABS are immediately freed (thrashing will be avoided because |
64 | * of magazines). |
64 | * of magazines). |
65 | * |
65 | * |
66 | * The SLAB information structure is kept inside the data area, if possible. |
66 | * The SLAB information structure is kept inside the data area, if possible. |
67 | * The cache can be marked that it should not use magazines. This is used |
67 | * The cache can be marked that it should not use magazines. This is used |
68 | * only for SLAB related caches to avoid deadlocks and infinite recursion |
68 | * only for SLAB related caches to avoid deadlocks and infinite recursion |
69 | * (the SLAB allocator uses itself for allocating all it's control structures). |
69 | * (the SLAB allocator uses itself for allocating all it's control structures). |
70 | * |
70 | * |
71 | * The SLAB allocator allocates lot of space and does not free it. When |
71 | * The SLAB allocator allocates lot of space and does not free it. When |
72 | * frame allocator fails to allocate the frame, it calls slab_reclaim(). |
72 | * frame allocator fails to allocate the frame, it calls slab_reclaim(). |
73 | * It tries 'light reclaim' first, then brutal reclaim. The light reclaim |
73 | * It tries 'light reclaim' first, then brutal reclaim. The light reclaim |
74 | * releases slabs from cpu-shared magazine-list, until at least 1 slab |
74 | * releases slabs from cpu-shared magazine-list, until at least 1 slab |
75 | * is deallocated in each cache (this algorithm should probably change). |
75 | * is deallocated in each cache (this algorithm should probably change). |
76 | * The brutal reclaim removes all cached objects, even from CPU-bound |
76 | * The brutal reclaim removes all cached objects, even from CPU-bound |
77 | * magazines. |
77 | * magazines. |
78 | * |
78 | * |
79 | * TODO: For better CPU-scaling the magazine allocation strategy should |
79 | * TODO: For better CPU-scaling the magazine allocation strategy should |
80 | * be extended. Currently, if the cache does not have magazine, it asks |
80 | * be extended. Currently, if the cache does not have magazine, it asks |
81 | * for non-cpu cached magazine cache to provide one. It might be feasible |
81 | * for non-cpu cached magazine cache to provide one. It might be feasible |
82 | * to add cpu-cached magazine cache (which would allocate it's magazines |
82 | * to add cpu-cached magazine cache (which would allocate it's magazines |
83 | * from non-cpu-cached mag. cache). This would provide a nice per-cpu |
83 | * from non-cpu-cached mag. cache). This would provide a nice per-cpu |
84 | * buffer. The other possibility is to use the per-cache |
84 | * buffer. The other possibility is to use the per-cache |
85 | * 'empty-magazine-list', which decreases competing for 1 per-system |
85 | * 'empty-magazine-list', which decreases competing for 1 per-system |
86 | * magazine cache. |
86 | * magazine cache. |
87 | * |
87 | * |
88 | * - it might be good to add granularity of locks even to slab level, |
88 | * - it might be good to add granularity of locks even to slab level, |
89 | * we could then try_spinlock over all partial slabs and thus improve |
89 | * we could then try_spinlock over all partial slabs and thus improve |
90 | * scalability even on slab level |
90 | * scalability even on slab level |
91 | */ |
91 | */ |
92 | 92 | ||
93 | 93 | ||
94 | #include <synch/spinlock.h> |
94 | #include <synch/spinlock.h> |
95 | #include <mm/slab.h> |
95 | #include <mm/slab.h> |
96 | #include <adt/list.h> |
96 | #include <adt/list.h> |
97 | #include <memstr.h> |
97 | #include <memstr.h> |
98 | #include <align.h> |
98 | #include <align.h> |
99 | #include <mm/heap.h> |
- | |
100 | #include <mm/frame.h> |
99 | #include <mm/frame.h> |
101 | #include <config.h> |
100 | #include <config.h> |
102 | #include <print.h> |
101 | #include <print.h> |
103 | #include <arch.h> |
102 | #include <arch.h> |
104 | #include <panic.h> |
103 | #include <panic.h> |
105 | #include <debug.h> |
104 | #include <debug.h> |
106 | #include <bitops.h> |
105 | #include <bitops.h> |
107 | 106 | ||
108 | SPINLOCK_INITIALIZE(slab_cache_lock); |
107 | SPINLOCK_INITIALIZE(slab_cache_lock); |
109 | static LIST_INITIALIZE(slab_cache_list); |
108 | static LIST_INITIALIZE(slab_cache_list); |
110 | 109 | ||
111 | /** Magazine cache */ |
110 | /** Magazine cache */ |
112 | static slab_cache_t mag_cache; |
111 | static slab_cache_t mag_cache; |
113 | /** Cache for cache descriptors */ |
112 | /** Cache for cache descriptors */ |
114 | static slab_cache_t slab_cache_cache; |
113 | static slab_cache_t slab_cache_cache; |
115 | /** Cache for external slab descriptors |
114 | /** Cache for external slab descriptors |
116 | * This time we want per-cpu cache, so do not make it static |
115 | * This time we want per-cpu cache, so do not make it static |
117 | * - using SLAB for internal SLAB structures will not deadlock, |
116 | * - using SLAB for internal SLAB structures will not deadlock, |
118 | * as all slab structures are 'small' - control structures of |
117 | * as all slab structures are 'small' - control structures of |
119 | * their caches do not require further allocation |
118 | * their caches do not require further allocation |
120 | */ |
119 | */ |
121 | static slab_cache_t *slab_extern_cache; |
120 | static slab_cache_t *slab_extern_cache; |
122 | /** Caches for malloc */ |
121 | /** Caches for malloc */ |
123 | static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1]; |
122 | static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1]; |
124 | char *malloc_names[] = { |
123 | char *malloc_names[] = { |
125 | "malloc-16","malloc-32","malloc-64","malloc-128", |
124 | "malloc-16","malloc-32","malloc-64","malloc-128", |
126 | "malloc-256","malloc-512","malloc-1K","malloc-2K", |
125 | "malloc-256","malloc-512","malloc-1K","malloc-2K", |
127 | "malloc-4K","malloc-8K","malloc-16K","malloc-32K", |
126 | "malloc-4K","malloc-8K","malloc-16K","malloc-32K", |
128 | "malloc-64K","malloc-128K" |
127 | "malloc-64K","malloc-128K" |
129 | }; |
128 | }; |
130 | 129 | ||
131 | /** Slab descriptor */ |
130 | /** Slab descriptor */ |
132 | typedef struct { |
131 | typedef struct { |
133 | slab_cache_t *cache; /**< Pointer to parent cache */ |
132 | slab_cache_t *cache; /**< Pointer to parent cache */ |
134 | link_t link; /* List of full/partial slabs */ |
133 | link_t link; /* List of full/partial slabs */ |
135 | void *start; /**< Start address of first available item */ |
134 | void *start; /**< Start address of first available item */ |
136 | count_t available; /**< Count of available items in this slab */ |
135 | count_t available; /**< Count of available items in this slab */ |
137 | index_t nextavail; /**< The index of next available item */ |
136 | index_t nextavail; /**< The index of next available item */ |
138 | }slab_t; |
137 | }slab_t; |
139 | 138 | ||
140 | #ifdef CONFIG_DEBUG |
139 | #ifdef CONFIG_DEBUG |
141 | static int _slab_initialized = 0; |
140 | static int _slab_initialized = 0; |
142 | #endif |
141 | #endif |
143 | 142 | ||
144 | /**************************************/ |
143 | /**************************************/ |
145 | /* SLAB allocation functions */ |
144 | /* SLAB allocation functions */ |
146 | 145 | ||
147 | /** |
146 | /** |
148 | * Allocate frames for slab space and initialize |
147 | * Allocate frames for slab space and initialize |
149 | * |
148 | * |
150 | */ |
149 | */ |
151 | static slab_t * slab_space_alloc(slab_cache_t *cache, int flags) |
150 | static slab_t * slab_space_alloc(slab_cache_t *cache, int flags) |
152 | { |
151 | { |
153 | void *data; |
152 | void *data; |
154 | slab_t *slab; |
153 | slab_t *slab; |
155 | size_t fsize; |
154 | size_t fsize; |
156 | int i; |
155 | int i; |
157 | zone_t *zone = NULL; |
- | |
158 | int status; |
156 | int status; |
159 | frame_t *frame; |
157 | pfn_t pfn; |
- | 158 | int zone=0; |
|
160 | 159 | ||
161 | data = (void *)frame_alloc_rc_zone(cache->order, FRAME_KA | flags, &status, &zone); |
160 | pfn = frame_alloc_rc_zone(cache->order, FRAME_KA | flags, &status, &zone); |
- | 161 | data = (void *) PA2KA(PFN2ADDR(pfn)); |
|
162 | if (status != FRAME_OK) { |
162 | if (status != FRAME_OK) { |
163 | return NULL; |
163 | return NULL; |
164 | } |
164 | } |
165 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) { |
165 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) { |
166 | slab = slab_alloc(slab_extern_cache, flags); |
166 | slab = slab_alloc(slab_extern_cache, flags); |
167 | if (!slab) { |
167 | if (!slab) { |
168 | frame_free((__address)data); |
168 | frame_free(ADDR2PFN(KA2PA(data))); |
169 | return NULL; |
169 | return NULL; |
170 | } |
170 | } |
171 | } else { |
171 | } else { |
172 | fsize = (PAGE_SIZE << cache->order); |
172 | fsize = (PAGE_SIZE << cache->order); |
173 | slab = data + fsize - sizeof(*slab); |
173 | slab = data + fsize - sizeof(*slab); |
174 | } |
174 | } |
175 | 175 | ||
176 | /* Fill in slab structures */ |
176 | /* Fill in slab structures */ |
177 | /* TODO: some better way of accessing the frame */ |
- | |
178 | for (i=0; i < (1 << cache->order); i++) { |
177 | for (i=0; i < (1 << cache->order); i++) |
179 | frame = ADDR2FRAME(zone, KA2PA((__address)(data+i*PAGE_SIZE))); |
- | |
180 | frame->parent = slab; |
178 | frame_set_parent(pfn+i, slab, zone); |
181 | } |
- | |
182 | 179 | ||
183 | slab->start = data; |
180 | slab->start = data; |
184 | slab->available = cache->objects; |
181 | slab->available = cache->objects; |
185 | slab->nextavail = 0; |
182 | slab->nextavail = 0; |
186 | slab->cache = cache; |
183 | slab->cache = cache; |
187 | 184 | ||
188 | for (i=0; i<cache->objects;i++) |
185 | for (i=0; i<cache->objects;i++) |
189 | *((int *) (slab->start + i*cache->size)) = i+1; |
186 | *((int *) (slab->start + i*cache->size)) = i+1; |
190 | 187 | ||
191 | atomic_inc(&cache->allocated_slabs); |
188 | atomic_inc(&cache->allocated_slabs); |
192 | return slab; |
189 | return slab; |
193 | } |
190 | } |
194 | 191 | ||
195 | /** |
192 | /** |
196 | * Deallocate space associated with SLAB |
193 | * Deallocate space associated with SLAB |
197 | * |
194 | * |
198 | * @return number of freed frames |
195 | * @return number of freed frames |
199 | */ |
196 | */ |
200 | static count_t slab_space_free(slab_cache_t *cache, slab_t *slab) |
197 | static count_t slab_space_free(slab_cache_t *cache, slab_t *slab) |
201 | { |
198 | { |
202 | frame_free((__address)slab->start); |
199 | frame_free(ADDR2PFN(KA2PA(slab->start))); |
203 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) |
200 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) |
204 | slab_free(slab_extern_cache, slab); |
201 | slab_free(slab_extern_cache, slab); |
205 | 202 | ||
206 | atomic_dec(&cache->allocated_slabs); |
203 | atomic_dec(&cache->allocated_slabs); |
207 | 204 | ||
208 | return 1 << cache->order; |
205 | return 1 << cache->order; |
209 | } |
206 | } |
210 | 207 | ||
211 | /** Map object to slab structure */ |
208 | /** Map object to slab structure */ |
212 | static slab_t * obj2slab(void *obj) |
209 | static slab_t * obj2slab(void *obj) |
213 | { |
210 | { |
214 | frame_t *frame; |
- | |
215 | - | ||
216 | frame = frame_addr2frame((__address)obj); |
- | |
217 | return (slab_t *)frame->parent; |
211 | return (slab_t *)frame_get_parent(ADDR2PFN(KA2PA(obj)), 0); |
218 | } |
212 | } |
219 | 213 | ||
220 | /**************************************/ |
214 | /**************************************/ |
221 | /* SLAB functions */ |
215 | /* SLAB functions */ |
222 | 216 | ||
223 | 217 | ||
224 | /** |
218 | /** |
225 | * Return object to slab and call a destructor |
219 | * Return object to slab and call a destructor |
226 | * |
220 | * |
227 | * @param slab If the caller knows directly slab of the object, otherwise NULL |
221 | * @param slab If the caller knows directly slab of the object, otherwise NULL |
228 | * |
222 | * |
229 | * @return Number of freed pages |
223 | * @return Number of freed pages |
230 | */ |
224 | */ |
231 | static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, |
225 | static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, |
232 | slab_t *slab) |
226 | slab_t *slab) |
233 | { |
227 | { |
234 | int freed = 0; |
228 | int freed = 0; |
235 | 229 | ||
236 | if (!slab) |
230 | if (!slab) |
237 | slab = obj2slab(obj); |
231 | slab = obj2slab(obj); |
238 | 232 | ||
239 | ASSERT(slab->cache == cache); |
233 | ASSERT(slab->cache == cache); |
240 | 234 | ||
241 | if (cache->destructor) |
235 | if (cache->destructor) |
242 | freed = cache->destructor(obj); |
236 | freed = cache->destructor(obj); |
243 | 237 | ||
244 | spinlock_lock(&cache->slablock); |
238 | spinlock_lock(&cache->slablock); |
245 | ASSERT(slab->available < cache->objects); |
239 | ASSERT(slab->available < cache->objects); |
246 | 240 | ||
247 | *((int *)obj) = slab->nextavail; |
241 | *((int *)obj) = slab->nextavail; |
248 | slab->nextavail = (obj - slab->start)/cache->size; |
242 | slab->nextavail = (obj - slab->start)/cache->size; |
249 | slab->available++; |
243 | slab->available++; |
250 | 244 | ||
251 | /* Move it to correct list */ |
245 | /* Move it to correct list */ |
252 | if (slab->available == cache->objects) { |
246 | if (slab->available == cache->objects) { |
253 | /* Free associated memory */ |
247 | /* Free associated memory */ |
254 | list_remove(&slab->link); |
248 | list_remove(&slab->link); |
255 | spinlock_unlock(&cache->slablock); |
249 | spinlock_unlock(&cache->slablock); |
256 | 250 | ||
257 | return freed + slab_space_free(cache, slab); |
251 | return freed + slab_space_free(cache, slab); |
258 | 252 | ||
259 | } else if (slab->available == 1) { |
253 | } else if (slab->available == 1) { |
260 | /* It was in full, move to partial */ |
254 | /* It was in full, move to partial */ |
261 | list_remove(&slab->link); |
255 | list_remove(&slab->link); |
262 | list_prepend(&slab->link, &cache->partial_slabs); |
256 | list_prepend(&slab->link, &cache->partial_slabs); |
263 | } |
257 | } |
264 | spinlock_unlock(&cache->slablock); |
258 | spinlock_unlock(&cache->slablock); |
265 | return freed; |
259 | return freed; |
266 | } |
260 | } |
267 | 261 | ||
268 | /** |
262 | /** |
269 | * Take new object from slab or create new if needed |
263 | * Take new object from slab or create new if needed |
270 | * |
264 | * |
271 | * @return Object address or null |
265 | * @return Object address or null |
272 | */ |
266 | */ |
273 | static void * slab_obj_create(slab_cache_t *cache, int flags) |
267 | static void * slab_obj_create(slab_cache_t *cache, int flags) |
274 | { |
268 | { |
275 | slab_t *slab; |
269 | slab_t *slab; |
276 | void *obj; |
270 | void *obj; |
277 | 271 | ||
278 | spinlock_lock(&cache->slablock); |
272 | spinlock_lock(&cache->slablock); |
279 | 273 | ||
280 | if (list_empty(&cache->partial_slabs)) { |
274 | if (list_empty(&cache->partial_slabs)) { |
281 | /* Allow recursion and reclaiming |
275 | /* Allow recursion and reclaiming |
282 | * - this should work, as the SLAB control structures |
276 | * - this should work, as the SLAB control structures |
283 | * are small and do not need to allocte with anything |
277 | * are small and do not need to allocte with anything |
284 | * other ten frame_alloc when they are allocating, |
278 | * other ten frame_alloc when they are allocating, |
285 | * that's why we should get recursion at most 1-level deep |
279 | * that's why we should get recursion at most 1-level deep |
286 | */ |
280 | */ |
287 | spinlock_unlock(&cache->slablock); |
281 | spinlock_unlock(&cache->slablock); |
288 | slab = slab_space_alloc(cache, flags); |
282 | slab = slab_space_alloc(cache, flags); |
289 | if (!slab) |
283 | if (!slab) |
290 | return NULL; |
284 | return NULL; |
291 | spinlock_lock(&cache->slablock); |
285 | spinlock_lock(&cache->slablock); |
292 | } else { |
286 | } else { |
293 | slab = list_get_instance(cache->partial_slabs.next, |
287 | slab = list_get_instance(cache->partial_slabs.next, |
294 | slab_t, |
288 | slab_t, |
295 | link); |
289 | link); |
296 | list_remove(&slab->link); |
290 | list_remove(&slab->link); |
297 | } |
291 | } |
298 | obj = slab->start + slab->nextavail * cache->size; |
292 | obj = slab->start + slab->nextavail * cache->size; |
299 | slab->nextavail = *((int *)obj); |
293 | slab->nextavail = *((int *)obj); |
300 | slab->available--; |
294 | slab->available--; |
301 | 295 | ||
302 | if (! slab->available) |
296 | if (! slab->available) |
303 | list_prepend(&slab->link, &cache->full_slabs); |
297 | list_prepend(&slab->link, &cache->full_slabs); |
304 | else |
298 | else |
305 | list_prepend(&slab->link, &cache->partial_slabs); |
299 | list_prepend(&slab->link, &cache->partial_slabs); |
306 | 300 | ||
307 | spinlock_unlock(&cache->slablock); |
301 | spinlock_unlock(&cache->slablock); |
308 | 302 | ||
309 | if (cache->constructor && cache->constructor(obj, flags)) { |
303 | if (cache->constructor && cache->constructor(obj, flags)) { |
310 | /* Bad, bad, construction failed */ |
304 | /* Bad, bad, construction failed */ |
311 | slab_obj_destroy(cache, obj, slab); |
305 | slab_obj_destroy(cache, obj, slab); |
312 | return NULL; |
306 | return NULL; |
313 | } |
307 | } |
314 | return obj; |
308 | return obj; |
315 | } |
309 | } |
316 | 310 | ||
317 | /**************************************/ |
311 | /**************************************/ |
318 | /* CPU-Cache slab functions */ |
312 | /* CPU-Cache slab functions */ |
319 | 313 | ||
320 | /** |
314 | /** |
321 | * Finds a full magazine in cache, takes it from list |
315 | * Finds a full magazine in cache, takes it from list |
322 | * and returns it |
316 | * and returns it |
323 | * |
317 | * |
324 | * @param first If true, return first, else last mag |
318 | * @param first If true, return first, else last mag |
325 | */ |
319 | */ |
326 | static slab_magazine_t * get_mag_from_cache(slab_cache_t *cache, |
320 | static slab_magazine_t * get_mag_from_cache(slab_cache_t *cache, |
327 | int first) |
321 | int first) |
328 | { |
322 | { |
329 | slab_magazine_t *mag = NULL; |
323 | slab_magazine_t *mag = NULL; |
330 | link_t *cur; |
324 | link_t *cur; |
331 | 325 | ||
332 | spinlock_lock(&cache->maglock); |
326 | spinlock_lock(&cache->maglock); |
333 | if (!list_empty(&cache->magazines)) { |
327 | if (!list_empty(&cache->magazines)) { |
334 | if (first) |
328 | if (first) |
335 | cur = cache->magazines.next; |
329 | cur = cache->magazines.next; |
336 | else |
330 | else |
337 | cur = cache->magazines.prev; |
331 | cur = cache->magazines.prev; |
338 | mag = list_get_instance(cur, slab_magazine_t, link); |
332 | mag = list_get_instance(cur, slab_magazine_t, link); |
339 | list_remove(&mag->link); |
333 | list_remove(&mag->link); |
340 | atomic_dec(&cache->magazine_counter); |
334 | atomic_dec(&cache->magazine_counter); |
341 | } |
335 | } |
342 | spinlock_unlock(&cache->maglock); |
336 | spinlock_unlock(&cache->maglock); |
343 | return mag; |
337 | return mag; |
344 | } |
338 | } |
345 | 339 | ||
346 | /** Prepend magazine to magazine list in cache */ |
340 | /** Prepend magazine to magazine list in cache */ |
347 | static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag) |
341 | static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag) |
348 | { |
342 | { |
349 | spinlock_lock(&cache->maglock); |
343 | spinlock_lock(&cache->maglock); |
350 | 344 | ||
351 | list_prepend(&mag->link, &cache->magazines); |
345 | list_prepend(&mag->link, &cache->magazines); |
352 | atomic_inc(&cache->magazine_counter); |
346 | atomic_inc(&cache->magazine_counter); |
353 | 347 | ||
354 | spinlock_unlock(&cache->maglock); |
348 | spinlock_unlock(&cache->maglock); |
355 | } |
349 | } |
356 | 350 | ||
357 | /** |
351 | /** |
358 | * Free all objects in magazine and free memory associated with magazine |
352 | * Free all objects in magazine and free memory associated with magazine |
359 | * |
353 | * |
360 | * @return Number of freed pages |
354 | * @return Number of freed pages |
361 | */ |
355 | */ |
362 | static count_t magazine_destroy(slab_cache_t *cache, |
356 | static count_t magazine_destroy(slab_cache_t *cache, |
363 | slab_magazine_t *mag) |
357 | slab_magazine_t *mag) |
364 | { |
358 | { |
365 | int i; |
359 | int i; |
366 | count_t frames = 0; |
360 | count_t frames = 0; |
367 | 361 | ||
368 | for (i=0;i < mag->busy; i++) { |
362 | for (i=0;i < mag->busy; i++) { |
369 | frames += slab_obj_destroy(cache, mag->objs[i], NULL); |
363 | frames += slab_obj_destroy(cache, mag->objs[i], NULL); |
370 | atomic_dec(&cache->cached_objs); |
364 | atomic_dec(&cache->cached_objs); |
371 | } |
365 | } |
372 | 366 | ||
373 | slab_free(&mag_cache, mag); |
367 | slab_free(&mag_cache, mag); |
374 | 368 | ||
375 | return frames; |
369 | return frames; |
376 | } |
370 | } |
377 | 371 | ||
378 | /** |
372 | /** |
379 | * Find full magazine, set it as current and return it |
373 | * Find full magazine, set it as current and return it |
380 | * |
374 | * |
381 | * Assume cpu_magazine lock is held |
375 | * Assume cpu_magazine lock is held |
382 | */ |
376 | */ |
383 | static slab_magazine_t * get_full_current_mag(slab_cache_t *cache) |
377 | static slab_magazine_t * get_full_current_mag(slab_cache_t *cache) |
384 | { |
378 | { |
385 | slab_magazine_t *cmag, *lastmag, *newmag; |
379 | slab_magazine_t *cmag, *lastmag, *newmag; |
386 | 380 | ||
387 | cmag = cache->mag_cache[CPU->id].current; |
381 | cmag = cache->mag_cache[CPU->id].current; |
388 | lastmag = cache->mag_cache[CPU->id].last; |
382 | lastmag = cache->mag_cache[CPU->id].last; |
389 | if (cmag) { /* First try local CPU magazines */ |
383 | if (cmag) { /* First try local CPU magazines */ |
390 | if (cmag->busy) |
384 | if (cmag->busy) |
391 | return cmag; |
385 | return cmag; |
392 | 386 | ||
393 | if (lastmag && lastmag->busy) { |
387 | if (lastmag && lastmag->busy) { |
394 | cache->mag_cache[CPU->id].current = lastmag; |
388 | cache->mag_cache[CPU->id].current = lastmag; |
395 | cache->mag_cache[CPU->id].last = cmag; |
389 | cache->mag_cache[CPU->id].last = cmag; |
396 | return lastmag; |
390 | return lastmag; |
397 | } |
391 | } |
398 | } |
392 | } |
399 | /* Local magazines are empty, import one from magazine list */ |
393 | /* Local magazines are empty, import one from magazine list */ |
400 | newmag = get_mag_from_cache(cache, 1); |
394 | newmag = get_mag_from_cache(cache, 1); |
401 | if (!newmag) |
395 | if (!newmag) |
402 | return NULL; |
396 | return NULL; |
403 | 397 | ||
404 | if (lastmag) |
398 | if (lastmag) |
405 | magazine_destroy(cache, lastmag); |
399 | magazine_destroy(cache, lastmag); |
406 | 400 | ||
407 | cache->mag_cache[CPU->id].last = cmag; |
401 | cache->mag_cache[CPU->id].last = cmag; |
408 | cache->mag_cache[CPU->id].current = newmag; |
402 | cache->mag_cache[CPU->id].current = newmag; |
409 | return newmag; |
403 | return newmag; |
410 | } |
404 | } |
411 | 405 | ||
412 | /** |
406 | /** |
413 | * Try to find object in CPU-cache magazines |
407 | * Try to find object in CPU-cache magazines |
414 | * |
408 | * |
415 | * @return Pointer to object or NULL if not available |
409 | * @return Pointer to object or NULL if not available |
416 | */ |
410 | */ |
417 | static void * magazine_obj_get(slab_cache_t *cache) |
411 | static void * magazine_obj_get(slab_cache_t *cache) |
418 | { |
412 | { |
419 | slab_magazine_t *mag; |
413 | slab_magazine_t *mag; |
420 | void *obj; |
414 | void *obj; |
421 | 415 | ||
422 | if (!CPU) |
416 | if (!CPU) |
423 | return NULL; |
417 | return NULL; |
424 | 418 | ||
425 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
419 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
426 | 420 | ||
427 | mag = get_full_current_mag(cache); |
421 | mag = get_full_current_mag(cache); |
428 | if (!mag) { |
422 | if (!mag) { |
429 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
423 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
430 | return NULL; |
424 | return NULL; |
431 | } |
425 | } |
432 | obj = mag->objs[--mag->busy]; |
426 | obj = mag->objs[--mag->busy]; |
433 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
427 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
434 | atomic_dec(&cache->cached_objs); |
428 | atomic_dec(&cache->cached_objs); |
435 | 429 | ||
436 | return obj; |
430 | return obj; |
437 | } |
431 | } |
438 | 432 | ||
439 | /** |
433 | /** |
440 | * Assure that the current magazine is empty, return pointer to it, or NULL if |
434 | * Assure that the current magazine is empty, return pointer to it, or NULL if |
441 | * no empty magazine is available and cannot be allocated |
435 | * no empty magazine is available and cannot be allocated |
442 | * |
436 | * |
443 | * Assume mag_cache[CPU->id].lock is held |
437 | * Assume mag_cache[CPU->id].lock is held |
444 | * |
438 | * |
445 | * We have 2 magazines bound to processor. |
439 | * We have 2 magazines bound to processor. |
446 | * First try the current. |
440 | * First try the current. |
447 | * If full, try the last. |
441 | * If full, try the last. |
448 | * If full, put to magazines list. |
442 | * If full, put to magazines list. |
449 | * allocate new, exchange last & current |
443 | * allocate new, exchange last & current |
450 | * |
444 | * |
451 | */ |
445 | */ |
452 | static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache) |
446 | static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache) |
453 | { |
447 | { |
454 | slab_magazine_t *cmag,*lastmag,*newmag; |
448 | slab_magazine_t *cmag,*lastmag,*newmag; |
455 | 449 | ||
456 | cmag = cache->mag_cache[CPU->id].current; |
450 | cmag = cache->mag_cache[CPU->id].current; |
457 | lastmag = cache->mag_cache[CPU->id].last; |
451 | lastmag = cache->mag_cache[CPU->id].last; |
458 | 452 | ||
459 | if (cmag) { |
453 | if (cmag) { |
460 | if (cmag->busy < cmag->size) |
454 | if (cmag->busy < cmag->size) |
461 | return cmag; |
455 | return cmag; |
462 | if (lastmag && lastmag->busy < lastmag->size) { |
456 | if (lastmag && lastmag->busy < lastmag->size) { |
463 | cache->mag_cache[CPU->id].last = cmag; |
457 | cache->mag_cache[CPU->id].last = cmag; |
464 | cache->mag_cache[CPU->id].current = lastmag; |
458 | cache->mag_cache[CPU->id].current = lastmag; |
465 | return lastmag; |
459 | return lastmag; |
466 | } |
460 | } |
467 | } |
461 | } |
468 | /* current | last are full | nonexistent, allocate new */ |
462 | /* current | last are full | nonexistent, allocate new */ |
469 | /* We do not want to sleep just because of caching */ |
463 | /* We do not want to sleep just because of caching */ |
470 | /* Especially we do not want reclaiming to start, as |
464 | /* Especially we do not want reclaiming to start, as |
471 | * this would deadlock */ |
465 | * this would deadlock */ |
472 | newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); |
466 | newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); |
473 | if (!newmag) |
467 | if (!newmag) |
474 | return NULL; |
468 | return NULL; |
475 | newmag->size = SLAB_MAG_SIZE; |
469 | newmag->size = SLAB_MAG_SIZE; |
476 | newmag->busy = 0; |
470 | newmag->busy = 0; |
477 | 471 | ||
478 | /* Flush last to magazine list */ |
472 | /* Flush last to magazine list */ |
479 | if (lastmag) |
473 | if (lastmag) |
480 | put_mag_to_cache(cache, lastmag); |
474 | put_mag_to_cache(cache, lastmag); |
481 | 475 | ||
482 | /* Move current as last, save new as current */ |
476 | /* Move current as last, save new as current */ |
483 | cache->mag_cache[CPU->id].last = cmag; |
477 | cache->mag_cache[CPU->id].last = cmag; |
484 | cache->mag_cache[CPU->id].current = newmag; |
478 | cache->mag_cache[CPU->id].current = newmag; |
485 | 479 | ||
486 | return newmag; |
480 | return newmag; |
487 | } |
481 | } |
488 | 482 | ||
489 | /** |
483 | /** |
490 | * Put object into CPU-cache magazine |
484 | * Put object into CPU-cache magazine |
491 | * |
485 | * |
492 | * @return 0 - success, -1 - could not get memory |
486 | * @return 0 - success, -1 - could not get memory |
493 | */ |
487 | */ |
494 | static int magazine_obj_put(slab_cache_t *cache, void *obj) |
488 | static int magazine_obj_put(slab_cache_t *cache, void *obj) |
495 | { |
489 | { |
496 | slab_magazine_t *mag; |
490 | slab_magazine_t *mag; |
497 | 491 | ||
498 | if (!CPU) |
492 | if (!CPU) |
499 | return -1; |
493 | return -1; |
500 | 494 | ||
501 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
495 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
502 | 496 | ||
503 | mag = make_empty_current_mag(cache); |
497 | mag = make_empty_current_mag(cache); |
504 | if (!mag) { |
498 | if (!mag) { |
505 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
499 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
506 | return -1; |
500 | return -1; |
507 | } |
501 | } |
508 | 502 | ||
509 | mag->objs[mag->busy++] = obj; |
503 | mag->objs[mag->busy++] = obj; |
510 | 504 | ||
511 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
505 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
512 | atomic_inc(&cache->cached_objs); |
506 | atomic_inc(&cache->cached_objs); |
513 | return 0; |
507 | return 0; |
514 | } |
508 | } |
515 | 509 | ||
516 | 510 | ||
517 | /**************************************/ |
511 | /**************************************/ |
518 | /* SLAB CACHE functions */ |
512 | /* SLAB CACHE functions */ |
519 | 513 | ||
520 | /** Return number of objects that fit in certain cache size */ |
514 | /** Return number of objects that fit in certain cache size */ |
521 | static int comp_objects(slab_cache_t *cache) |
515 | static int comp_objects(slab_cache_t *cache) |
522 | { |
516 | { |
523 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
517 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
524 | return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size; |
518 | return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size; |
525 | else |
519 | else |
526 | return (PAGE_SIZE << cache->order) / cache->size; |
520 | return (PAGE_SIZE << cache->order) / cache->size; |
527 | } |
521 | } |
528 | 522 | ||
529 | /** Return wasted space in slab */ |
523 | /** Return wasted space in slab */ |
530 | static int badness(slab_cache_t *cache) |
524 | static int badness(slab_cache_t *cache) |
531 | { |
525 | { |
532 | int objects; |
526 | int objects; |
533 | int ssize; |
527 | int ssize; |
534 | 528 | ||
535 | objects = comp_objects(cache); |
529 | objects = comp_objects(cache); |
536 | ssize = PAGE_SIZE << cache->order; |
530 | ssize = PAGE_SIZE << cache->order; |
537 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
531 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
538 | ssize -= sizeof(slab_t); |
532 | ssize -= sizeof(slab_t); |
539 | return ssize - objects*cache->size; |
533 | return ssize - objects*cache->size; |
540 | } |
534 | } |
541 | 535 | ||
542 | /** |
536 | /** |
543 | * Initialize mag_cache structure in slab cache |
537 | * Initialize mag_cache structure in slab cache |
544 | */ |
538 | */ |
545 | static void make_magcache(slab_cache_t *cache) |
539 | static void make_magcache(slab_cache_t *cache) |
546 | { |
540 | { |
547 | int i; |
541 | int i; |
548 | 542 | ||
549 | ASSERT(_slab_initialized >= 2); |
543 | ASSERT(_slab_initialized >= 2); |
550 | 544 | ||
551 | cache->mag_cache = kalloc(sizeof(slab_mag_cache_t)*config.cpu_count,0); |
545 | cache->mag_cache = kalloc(sizeof(slab_mag_cache_t)*config.cpu_count,0); |
552 | for (i=0; i < config.cpu_count; i++) { |
546 | for (i=0; i < config.cpu_count; i++) { |
553 | memsetb((__address)&cache->mag_cache[i], |
547 | memsetb((__address)&cache->mag_cache[i], |
554 | sizeof(cache->mag_cache[i]), 0); |
548 | sizeof(cache->mag_cache[i]), 0); |
555 | spinlock_initialize(&cache->mag_cache[i].lock, |
549 | spinlock_initialize(&cache->mag_cache[i].lock, |
556 | "slab_maglock_cpu"); |
550 | "slab_maglock_cpu"); |
557 | } |
551 | } |
558 | } |
552 | } |
559 | 553 | ||
560 | /** Initialize allocated memory as a slab cache */ |
554 | /** Initialize allocated memory as a slab cache */ |
561 | static void |
555 | static void |
562 | _slab_cache_create(slab_cache_t *cache, |
556 | _slab_cache_create(slab_cache_t *cache, |
563 | char *name, |
557 | char *name, |
564 | size_t size, |
558 | size_t size, |
565 | size_t align, |
559 | size_t align, |
566 | int (*constructor)(void *obj, int kmflag), |
560 | int (*constructor)(void *obj, int kmflag), |
567 | int (*destructor)(void *obj), |
561 | int (*destructor)(void *obj), |
568 | int flags) |
562 | int flags) |
569 | { |
563 | { |
570 | int pages; |
564 | int pages; |
571 | ipl_t ipl; |
565 | ipl_t ipl; |
572 | 566 | ||
573 | memsetb((__address)cache, sizeof(*cache), 0); |
567 | memsetb((__address)cache, sizeof(*cache), 0); |
574 | cache->name = name; |
568 | cache->name = name; |
575 | 569 | ||
576 | if (align < sizeof(__native)) |
570 | if (align < sizeof(__native)) |
577 | align = sizeof(__native); |
571 | align = sizeof(__native); |
578 | size = ALIGN_UP(size, align); |
572 | size = ALIGN_UP(size, align); |
579 | 573 | ||
580 | cache->size = size; |
574 | cache->size = size; |
581 | 575 | ||
582 | cache->constructor = constructor; |
576 | cache->constructor = constructor; |
583 | cache->destructor = destructor; |
577 | cache->destructor = destructor; |
584 | cache->flags = flags; |
578 | cache->flags = flags; |
585 | 579 | ||
586 | list_initialize(&cache->full_slabs); |
580 | list_initialize(&cache->full_slabs); |
587 | list_initialize(&cache->partial_slabs); |
581 | list_initialize(&cache->partial_slabs); |
588 | list_initialize(&cache->magazines); |
582 | list_initialize(&cache->magazines); |
589 | spinlock_initialize(&cache->slablock, "slab_lock"); |
583 | spinlock_initialize(&cache->slablock, "slab_lock"); |
590 | spinlock_initialize(&cache->maglock, "slab_maglock"); |
584 | spinlock_initialize(&cache->maglock, "slab_maglock"); |
591 | if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) |
585 | if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) |
592 | make_magcache(cache); |
586 | make_magcache(cache); |
593 | 587 | ||
594 | /* Compute slab sizes, object counts in slabs etc. */ |
588 | /* Compute slab sizes, object counts in slabs etc. */ |
595 | if (cache->size < SLAB_INSIDE_SIZE) |
589 | if (cache->size < SLAB_INSIDE_SIZE) |
596 | cache->flags |= SLAB_CACHE_SLINSIDE; |
590 | cache->flags |= SLAB_CACHE_SLINSIDE; |
597 | 591 | ||
598 | /* Minimum slab order */ |
592 | /* Minimum slab order */ |
599 | pages = ((cache->size-1) >> PAGE_WIDTH) + 1; |
593 | pages = ((cache->size-1) >> PAGE_WIDTH) + 1; |
600 | cache->order = fnzb(pages); |
594 | cache->order = fnzb(pages); |
601 | 595 | ||
602 | while (badness(cache) > SLAB_MAX_BADNESS(cache)) { |
596 | while (badness(cache) > SLAB_MAX_BADNESS(cache)) { |
603 | cache->order += 1; |
597 | cache->order += 1; |
604 | } |
598 | } |
605 | cache->objects = comp_objects(cache); |
599 | cache->objects = comp_objects(cache); |
606 | /* If info fits in, put it inside */ |
600 | /* If info fits in, put it inside */ |
607 | if (badness(cache) > sizeof(slab_t)) |
601 | if (badness(cache) > sizeof(slab_t)) |
608 | cache->flags |= SLAB_CACHE_SLINSIDE; |
602 | cache->flags |= SLAB_CACHE_SLINSIDE; |
609 | 603 | ||
610 | /* Add cache to cache list */ |
604 | /* Add cache to cache list */ |
611 | ipl = interrupts_disable(); |
605 | ipl = interrupts_disable(); |
612 | spinlock_lock(&slab_cache_lock); |
606 | spinlock_lock(&slab_cache_lock); |
613 | 607 | ||
614 | list_append(&cache->link, &slab_cache_list); |
608 | list_append(&cache->link, &slab_cache_list); |
615 | 609 | ||
616 | spinlock_unlock(&slab_cache_lock); |
610 | spinlock_unlock(&slab_cache_lock); |
617 | interrupts_restore(ipl); |
611 | interrupts_restore(ipl); |
618 | } |
612 | } |
619 | 613 | ||
620 | /** Create slab cache */ |
614 | /** Create slab cache */ |
621 | slab_cache_t * slab_cache_create(char *name, |
615 | slab_cache_t * slab_cache_create(char *name, |
622 | size_t size, |
616 | size_t size, |
623 | size_t align, |
617 | size_t align, |
624 | int (*constructor)(void *obj, int kmflag), |
618 | int (*constructor)(void *obj, int kmflag), |
625 | int (*destructor)(void *obj), |
619 | int (*destructor)(void *obj), |
626 | int flags) |
620 | int flags) |
627 | { |
621 | { |
628 | slab_cache_t *cache; |
622 | slab_cache_t *cache; |
629 | 623 | ||
630 | cache = slab_alloc(&slab_cache_cache, 0); |
624 | cache = slab_alloc(&slab_cache_cache, 0); |
631 | _slab_cache_create(cache, name, size, align, constructor, destructor, |
625 | _slab_cache_create(cache, name, size, align, constructor, destructor, |
632 | flags); |
626 | flags); |
633 | return cache; |
627 | return cache; |
634 | } |
628 | } |
635 | 629 | ||
636 | /** |
630 | /** |
637 | * Reclaim space occupied by objects that are already free |
631 | * Reclaim space occupied by objects that are already free |
638 | * |
632 | * |
639 | * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing |
633 | * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing |
640 | * @return Number of freed pages |
634 | * @return Number of freed pages |
641 | */ |
635 | */ |
642 | static count_t _slab_reclaim(slab_cache_t *cache, int flags) |
636 | static count_t _slab_reclaim(slab_cache_t *cache, int flags) |
643 | { |
637 | { |
644 | int i; |
638 | int i; |
645 | slab_magazine_t *mag; |
639 | slab_magazine_t *mag; |
646 | count_t frames = 0; |
640 | count_t frames = 0; |
647 | int magcount; |
641 | int magcount; |
648 | 642 | ||
649 | if (cache->flags & SLAB_CACHE_NOMAGAZINE) |
643 | if (cache->flags & SLAB_CACHE_NOMAGAZINE) |
650 | return 0; /* Nothing to do */ |
644 | return 0; /* Nothing to do */ |
651 | 645 | ||
652 | /* We count up to original magazine count to avoid |
646 | /* We count up to original magazine count to avoid |
653 | * endless loop |
647 | * endless loop |
654 | */ |
648 | */ |
655 | magcount = atomic_get(&cache->magazine_counter); |
649 | magcount = atomic_get(&cache->magazine_counter); |
656 | while (magcount-- && (mag=get_mag_from_cache(cache,0))) { |
650 | while (magcount-- && (mag=get_mag_from_cache(cache,0))) { |
657 | frames += magazine_destroy(cache,mag); |
651 | frames += magazine_destroy(cache,mag); |
658 | if (!(flags & SLAB_RECLAIM_ALL) && frames) |
652 | if (!(flags & SLAB_RECLAIM_ALL) && frames) |
659 | break; |
653 | break; |
660 | } |
654 | } |
661 | 655 | ||
662 | if (flags & SLAB_RECLAIM_ALL) { |
656 | if (flags & SLAB_RECLAIM_ALL) { |
663 | /* Free cpu-bound magazines */ |
657 | /* Free cpu-bound magazines */ |
664 | /* Destroy CPU magazines */ |
658 | /* Destroy CPU magazines */ |
665 | for (i=0; i<config.cpu_count; i++) { |
659 | for (i=0; i<config.cpu_count; i++) { |
666 | spinlock_lock(&cache->mag_cache[i].lock); |
660 | spinlock_lock(&cache->mag_cache[i].lock); |
667 | 661 | ||
668 | mag = cache->mag_cache[i].current; |
662 | mag = cache->mag_cache[i].current; |
669 | if (mag) |
663 | if (mag) |
670 | frames += magazine_destroy(cache, mag); |
664 | frames += magazine_destroy(cache, mag); |
671 | cache->mag_cache[i].current = NULL; |
665 | cache->mag_cache[i].current = NULL; |
672 | 666 | ||
673 | mag = cache->mag_cache[i].last; |
667 | mag = cache->mag_cache[i].last; |
674 | if (mag) |
668 | if (mag) |
675 | frames += magazine_destroy(cache, mag); |
669 | frames += magazine_destroy(cache, mag); |
676 | cache->mag_cache[i].last = NULL; |
670 | cache->mag_cache[i].last = NULL; |
677 | 671 | ||
678 | spinlock_unlock(&cache->mag_cache[i].lock); |
672 | spinlock_unlock(&cache->mag_cache[i].lock); |
679 | } |
673 | } |
680 | } |
674 | } |
681 | 675 | ||
682 | return frames; |
676 | return frames; |
683 | } |
677 | } |
684 | 678 | ||
685 | /** Check that there are no slabs and remove cache from system */ |
679 | /** Check that there are no slabs and remove cache from system */ |
686 | void slab_cache_destroy(slab_cache_t *cache) |
680 | void slab_cache_destroy(slab_cache_t *cache) |
687 | { |
681 | { |
688 | ipl_t ipl; |
682 | ipl_t ipl; |
689 | 683 | ||
690 | /* First remove cache from link, so that we don't need |
684 | /* First remove cache from link, so that we don't need |
691 | * to disable interrupts later |
685 | * to disable interrupts later |
692 | */ |
686 | */ |
693 | 687 | ||
694 | ipl = interrupts_disable(); |
688 | ipl = interrupts_disable(); |
695 | spinlock_lock(&slab_cache_lock); |
689 | spinlock_lock(&slab_cache_lock); |
696 | 690 | ||
697 | list_remove(&cache->link); |
691 | list_remove(&cache->link); |
698 | 692 | ||
699 | spinlock_unlock(&slab_cache_lock); |
693 | spinlock_unlock(&slab_cache_lock); |
700 | interrupts_restore(ipl); |
694 | interrupts_restore(ipl); |
701 | 695 | ||
702 | /* Do not lock anything, we assume the software is correct and |
696 | /* Do not lock anything, we assume the software is correct and |
703 | * does not touch the cache when it decides to destroy it */ |
697 | * does not touch the cache when it decides to destroy it */ |
704 | 698 | ||
705 | /* Destroy all magazines */ |
699 | /* Destroy all magazines */ |
706 | _slab_reclaim(cache, SLAB_RECLAIM_ALL); |
700 | _slab_reclaim(cache, SLAB_RECLAIM_ALL); |
707 | 701 | ||
708 | /* All slabs must be empty */ |
702 | /* All slabs must be empty */ |
709 | if (!list_empty(&cache->full_slabs) \ |
703 | if (!list_empty(&cache->full_slabs) \ |
710 | || !list_empty(&cache->partial_slabs)) |
704 | || !list_empty(&cache->partial_slabs)) |
711 | panic("Destroying cache that is not empty."); |
705 | panic("Destroying cache that is not empty."); |
712 | 706 | ||
713 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) |
707 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) |
714 | kfree(cache->mag_cache); |
708 | kfree(cache->mag_cache); |
715 | slab_free(&slab_cache_cache, cache); |
709 | slab_free(&slab_cache_cache, cache); |
716 | } |
710 | } |
717 | 711 | ||
718 | /** Allocate new object from cache - if no flags given, always returns |
712 | /** Allocate new object from cache - if no flags given, always returns |
719 | memory */ |
713 | memory */ |
720 | void * slab_alloc(slab_cache_t *cache, int flags) |
714 | void * slab_alloc(slab_cache_t *cache, int flags) |
721 | { |
715 | { |
722 | ipl_t ipl; |
716 | ipl_t ipl; |
723 | void *result = NULL; |
717 | void *result = NULL; |
724 | 718 | ||
725 | /* Disable interrupts to avoid deadlocks with interrupt handlers */ |
719 | /* Disable interrupts to avoid deadlocks with interrupt handlers */ |
726 | ipl = interrupts_disable(); |
720 | ipl = interrupts_disable(); |
727 | 721 | ||
728 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) |
722 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) { |
729 | result = magazine_obj_get(cache); |
723 | result = magazine_obj_get(cache); |
- | 724 | } |
|
730 | if (!result) |
725 | if (!result) |
731 | result = slab_obj_create(cache, flags); |
726 | result = slab_obj_create(cache, flags); |
732 | 727 | ||
733 | interrupts_restore(ipl); |
728 | interrupts_restore(ipl); |
734 | 729 | ||
735 | if (result) |
730 | if (result) |
736 | atomic_inc(&cache->allocated_objs); |
731 | atomic_inc(&cache->allocated_objs); |
737 | 732 | ||
738 | return result; |
733 | return result; |
739 | } |
734 | } |
740 | 735 | ||
741 | /** Return object to cache, use slab if known */ |
736 | /** Return object to cache, use slab if known */ |
742 | static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) |
737 | static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) |
743 | { |
738 | { |
744 | ipl_t ipl; |
739 | ipl_t ipl; |
745 | 740 | ||
746 | ipl = interrupts_disable(); |
741 | ipl = interrupts_disable(); |
747 | 742 | ||
748 | if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \ |
743 | if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \ |
749 | || magazine_obj_put(cache, obj)) { |
744 | || magazine_obj_put(cache, obj)) { |
750 | 745 | ||
751 | slab_obj_destroy(cache, obj, slab); |
746 | slab_obj_destroy(cache, obj, slab); |
752 | 747 | ||
753 | } |
748 | } |
754 | interrupts_restore(ipl); |
749 | interrupts_restore(ipl); |
755 | atomic_dec(&cache->allocated_objs); |
750 | atomic_dec(&cache->allocated_objs); |
756 | } |
751 | } |
757 | 752 | ||
758 | /** Return slab object to cache */ |
753 | /** Return slab object to cache */ |
759 | void slab_free(slab_cache_t *cache, void *obj) |
754 | void slab_free(slab_cache_t *cache, void *obj) |
760 | { |
755 | { |
761 | _slab_free(cache,obj,NULL); |
756 | _slab_free(cache,obj,NULL); |
762 | } |
757 | } |
763 | 758 | ||
764 | /* Go through all caches and reclaim what is possible */ |
759 | /* Go through all caches and reclaim what is possible */ |
765 | count_t slab_reclaim(int flags) |
760 | count_t slab_reclaim(int flags) |
766 | { |
761 | { |
767 | slab_cache_t *cache; |
762 | slab_cache_t *cache; |
768 | link_t *cur; |
763 | link_t *cur; |
769 | count_t frames = 0; |
764 | count_t frames = 0; |
770 | 765 | ||
771 | spinlock_lock(&slab_cache_lock); |
766 | spinlock_lock(&slab_cache_lock); |
772 | 767 | ||
773 | /* TODO: Add assert, that interrupts are disabled, otherwise |
768 | /* TODO: Add assert, that interrupts are disabled, otherwise |
774 | * memory allocation from interrupts can deadlock. |
769 | * memory allocation from interrupts can deadlock. |
775 | */ |
770 | */ |
776 | 771 | ||
777 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
772 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
778 | cache = list_get_instance(cur, slab_cache_t, link); |
773 | cache = list_get_instance(cur, slab_cache_t, link); |
779 | frames += _slab_reclaim(cache, flags); |
774 | frames += _slab_reclaim(cache, flags); |
780 | } |
775 | } |
781 | 776 | ||
782 | spinlock_unlock(&slab_cache_lock); |
777 | spinlock_unlock(&slab_cache_lock); |
783 | 778 | ||
784 | return frames; |
779 | return frames; |
785 | } |
780 | } |
786 | 781 | ||
787 | 782 | ||
788 | /* Print list of slabs */ |
783 | /* Print list of slabs */ |
789 | void slab_print_list(void) |
784 | void slab_print_list(void) |
790 | { |
785 | { |
791 | slab_cache_t *cache; |
786 | slab_cache_t *cache; |
792 | link_t *cur; |
787 | link_t *cur; |
793 | ipl_t ipl; |
788 | ipl_t ipl; |
794 | 789 | ||
795 | ipl = interrupts_disable(); |
790 | ipl = interrupts_disable(); |
796 | spinlock_lock(&slab_cache_lock); |
791 | spinlock_lock(&slab_cache_lock); |
797 | printf("SLAB name\tOsize\tPages\tObj/pg\tSlabs\tCached\tAllocobjs\tCtl\n"); |
792 | printf("SLAB name\tOsize\tPages\tObj/pg\tSlabs\tCached\tAllocobjs\tCtl\n"); |
798 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
793 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
799 | cache = list_get_instance(cur, slab_cache_t, link); |
794 | cache = list_get_instance(cur, slab_cache_t, link); |
800 | printf("%s\t%d\t%d\t%d\t%d\t%d\t%d\t\t%s\n", cache->name, cache->size, |
795 | printf("%s\t%d\t%d\t%d\t%d\t%d\t%d\t\t%s\n", cache->name, cache->size, |
801 | (1 << cache->order), cache->objects, |
796 | (1 << cache->order), cache->objects, |
802 | atomic_get(&cache->allocated_slabs), |
797 | atomic_get(&cache->allocated_slabs), |
803 | atomic_get(&cache->cached_objs), |
798 | atomic_get(&cache->cached_objs), |
804 | atomic_get(&cache->allocated_objs), |
799 | atomic_get(&cache->allocated_objs), |
805 | cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out"); |
800 | cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out"); |
806 | } |
801 | } |
807 | spinlock_unlock(&slab_cache_lock); |
802 | spinlock_unlock(&slab_cache_lock); |
808 | interrupts_restore(ipl); |
803 | interrupts_restore(ipl); |
809 | } |
804 | } |
810 | 805 | ||
811 | void slab_cache_init(void) |
806 | void slab_cache_init(void) |
812 | { |
807 | { |
813 | int i, size; |
808 | int i, size; |
814 | 809 | ||
815 | /* Initialize magazine cache */ |
810 | /* Initialize magazine cache */ |
816 | _slab_cache_create(&mag_cache, |
811 | _slab_cache_create(&mag_cache, |
817 | "slab_magazine", |
812 | "slab_magazine", |
818 | sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*), |
813 | sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*), |
819 | sizeof(__address), |
814 | sizeof(__address), |
820 | NULL, NULL, |
815 | NULL, NULL, |
821 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
816 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
822 | /* Initialize slab_cache cache */ |
817 | /* Initialize slab_cache cache */ |
823 | _slab_cache_create(&slab_cache_cache, |
818 | _slab_cache_create(&slab_cache_cache, |
824 | "slab_cache", |
819 | "slab_cache", |
825 | sizeof(slab_cache_cache), |
820 | sizeof(slab_cache_cache), |
826 | sizeof(__address), |
821 | sizeof(__address), |
827 | NULL, NULL, |
822 | NULL, NULL, |
828 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
823 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
829 | /* Initialize external slab cache */ |
824 | /* Initialize external slab cache */ |
830 | slab_extern_cache = slab_cache_create("slab_extern", |
825 | slab_extern_cache = slab_cache_create("slab_extern", |
831 | sizeof(slab_t), |
826 | sizeof(slab_t), |
832 | 0, NULL, NULL, |
827 | 0, NULL, NULL, |
833 | SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED); |
828 | SLAB_CACHE_SLINSIDE | SLAB_CACHE_MAGDEFERRED); |
834 | 829 | ||
835 | /* Initialize structures for malloc */ |
830 | /* Initialize structures for malloc */ |
836 | for (i=0, size=(1<<SLAB_MIN_MALLOC_W); |
831 | for (i=0, size=(1<<SLAB_MIN_MALLOC_W); |
837 | i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1); |
832 | i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1); |
838 | i++, size <<= 1) { |
833 | i++, size <<= 1) { |
839 | malloc_caches[i] = slab_cache_create(malloc_names[i], |
834 | malloc_caches[i] = slab_cache_create(malloc_names[i], |
840 | size, 0, |
835 | size, 0, |
841 | NULL,NULL, SLAB_CACHE_MAGDEFERRED); |
836 | NULL,NULL, SLAB_CACHE_MAGDEFERRED); |
842 | } |
837 | } |
843 | #ifdef CONFIG_DEBUG |
838 | #ifdef CONFIG_DEBUG |
844 | _slab_initialized = 1; |
839 | _slab_initialized = 1; |
845 | #endif |
840 | #endif |
846 | } |
841 | } |
847 | 842 | ||
848 | /** Enable cpu_cache |
843 | /** Enable cpu_cache |
849 | * |
844 | * |
850 | * Kernel calls this function, when it knows the real number of |
845 | * Kernel calls this function, when it knows the real number of |
851 | * processors. |
846 | * processors. |
852 | * Allocate slab for cpucache and enable it on all existing |
847 | * Allocate slab for cpucache and enable it on all existing |
853 | * slabs that are SLAB_CACHE_MAGDEFERRED |
848 | * slabs that are SLAB_CACHE_MAGDEFERRED |
854 | */ |
849 | */ |
855 | void slab_enable_cpucache(void) |
850 | void slab_enable_cpucache(void) |
856 | { |
851 | { |
857 | link_t *cur; |
852 | link_t *cur; |
858 | slab_cache_t *s; |
853 | slab_cache_t *s; |
859 | 854 | ||
860 | #ifdef CONFIG_DEBUG |
855 | #ifdef CONFIG_DEBUG |
861 | _slab_initialized = 2; |
856 | _slab_initialized = 2; |
862 | #endif |
857 | #endif |
863 | 858 | ||
864 | spinlock_lock(&slab_cache_lock); |
859 | spinlock_lock(&slab_cache_lock); |
865 | 860 | ||
866 | for (cur=slab_cache_list.next; cur != &slab_cache_list;cur=cur->next){ |
861 | for (cur=slab_cache_list.next; cur != &slab_cache_list;cur=cur->next){ |
867 | s = list_get_instance(cur, slab_cache_t, link); |
862 | s = list_get_instance(cur, slab_cache_t, link); |
868 | if ((s->flags & SLAB_CACHE_MAGDEFERRED) != SLAB_CACHE_MAGDEFERRED) |
863 | if ((s->flags & SLAB_CACHE_MAGDEFERRED) != SLAB_CACHE_MAGDEFERRED) |
869 | continue; |
864 | continue; |
870 | make_magcache(s); |
865 | make_magcache(s); |
871 | s->flags &= ~SLAB_CACHE_MAGDEFERRED; |
866 | s->flags &= ~SLAB_CACHE_MAGDEFERRED; |
872 | } |
867 | } |
873 | 868 | ||
874 | spinlock_unlock(&slab_cache_lock); |
869 | spinlock_unlock(&slab_cache_lock); |
875 | } |
870 | } |
876 | 871 | ||
877 | /**************************************/ |
872 | /**************************************/ |
878 | /* kalloc/kfree functions */ |
873 | /* kalloc/kfree functions */ |
879 | void * kalloc(unsigned int size, int flags) |
874 | void * kalloc(unsigned int size, int flags) |
880 | { |
875 | { |
881 | int idx; |
876 | int idx; |
882 | 877 | ||
883 | ASSERT(_slab_initialized); |
878 | ASSERT(_slab_initialized); |
884 | ASSERT( size && size <= (1 << SLAB_MAX_MALLOC_W)); |
879 | ASSERT( size && size <= (1 << SLAB_MAX_MALLOC_W)); |
885 | 880 | ||
886 | if (size < (1 << SLAB_MIN_MALLOC_W)) |
881 | if (size < (1 << SLAB_MIN_MALLOC_W)) |
887 | size = (1 << SLAB_MIN_MALLOC_W); |
882 | size = (1 << SLAB_MIN_MALLOC_W); |
888 | 883 | ||
889 | idx = fnzb(size-1) - SLAB_MIN_MALLOC_W + 1; |
884 | idx = fnzb(size-1) - SLAB_MIN_MALLOC_W + 1; |
890 | 885 | ||
891 | return slab_alloc(malloc_caches[idx], flags); |
886 | return slab_alloc(malloc_caches[idx], flags); |
892 | } |
887 | } |
893 | 888 | ||
894 | 889 | ||
895 | void kfree(void *obj) |
890 | void kfree(void *obj) |
896 | { |
891 | { |
897 | slab_t *slab; |
892 | slab_t *slab; |
898 | 893 | ||
899 | if (!obj) return; |
894 | if (!obj) return; |
900 | 895 | ||
901 | slab = obj2slab(obj); |
896 | slab = obj2slab(obj); |
902 | _slab_free(slab->cache, obj, slab); |
897 | _slab_free(slab->cache, obj, slab); |
903 | } |
898 | } |
904 | 899 |