Rev 782 | Rev 785 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 782 | Rev 783 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2006 Ondrej Palkovsky |
2 | * Copyright (C) 2006 Ondrej Palkovsky |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | /* |
29 | /* |
30 | * The SLAB allocator is closely modelled after Opensolaris SLAB allocator |
30 | * The SLAB allocator is closely modelled after Opensolaris SLAB allocator |
31 | * http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/ |
31 | * http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick_html/ |
32 | * |
32 | * |
33 | * with the following exceptions: |
33 | * with the following exceptions: |
34 | * - empty SLABS are deallocated immediately |
34 | * - empty SLABS are deallocated immediately |
35 | * (in Linux they are kept in linked list, in Solaris ???) |
35 | * (in Linux they are kept in linked list, in Solaris ???) |
36 | * - empty magazines are deallocated when not needed |
36 | * - empty magazines are deallocated when not needed |
37 | * (in Solaris they are held in linked list in slab cache) |
37 | * (in Solaris they are held in linked list in slab cache) |
38 | * |
38 | * |
39 | * Following features are not currently supported but would be easy to do: |
39 | * Following features are not currently supported but would be easy to do: |
40 | * - cache coloring |
40 | * - cache coloring |
41 | * - dynamic magazine growing (different magazine sizes are already |
41 | * - dynamic magazine growing (different magazine sizes are already |
42 | * supported, but we would need to adjust allocating strategy) |
42 | * supported, but we would need to adjust allocating strategy) |
43 | * |
43 | * |
44 | * The SLAB allocator supports per-CPU caches ('magazines') to facilitate |
44 | * The SLAB allocator supports per-CPU caches ('magazines') to facilitate |
45 | * good SMP scaling. |
45 | * good SMP scaling. |
46 | * |
46 | * |
47 | * When a new object is being allocated, it is first checked, if it is |
47 | * When a new object is being allocated, it is first checked, if it is |
48 | * available in CPU-bound magazine. If it is not found there, it is |
48 | * available in CPU-bound magazine. If it is not found there, it is |
49 | * allocated from CPU-shared SLAB - if partial full is found, it is used, |
49 | * allocated from CPU-shared SLAB - if partial full is found, it is used, |
50 | * otherwise a new one is allocated. |
50 | * otherwise a new one is allocated. |
51 | * |
51 | * |
52 | * When an object is being deallocated, it is put to CPU-bound magazine. |
52 | * When an object is being deallocated, it is put to CPU-bound magazine. |
53 | * If there is no such magazine, new one is allocated (if it fails, |
53 | * If there is no such magazine, new one is allocated (if it fails, |
54 | * the object is deallocated into SLAB). If the magazine is full, it is |
54 | * the object is deallocated into SLAB). If the magazine is full, it is |
55 | * put into cpu-shared list of magazines and new one is allocated. |
55 | * put into cpu-shared list of magazines and new one is allocated. |
56 | * |
56 | * |
57 | * The CPU-bound magazine is actually a pair of magazine to avoid |
57 | * The CPU-bound magazine is actually a pair of magazine to avoid |
58 | * thrashing when somebody is allocating/deallocating 1 item at the magazine |
58 | * thrashing when somebody is allocating/deallocating 1 item at the magazine |
59 | * size boundary. LIFO order is enforced, which should avoid fragmentation |
59 | * size boundary. LIFO order is enforced, which should avoid fragmentation |
60 | * as much as possible. |
60 | * as much as possible. |
61 | * |
61 | * |
62 | * Every cache contains list of full slabs and list of partialy full slabs. |
62 | * Every cache contains list of full slabs and list of partialy full slabs. |
63 | * Empty SLABS are immediately freed (thrashing will be avoided because |
63 | * Empty SLABS are immediately freed (thrashing will be avoided because |
64 | * of magazines). |
64 | * of magazines). |
65 | * |
65 | * |
66 | * The SLAB information structure is kept inside the data area, if possible. |
66 | * The SLAB information structure is kept inside the data area, if possible. |
67 | * The cache can be marked that it should not use magazines. This is used |
67 | * The cache can be marked that it should not use magazines. This is used |
68 | * only for SLAB related caches to avoid deadlocks and infinite recursion |
68 | * only for SLAB related caches to avoid deadlocks and infinite recursion |
69 | * (the SLAB allocator uses itself for allocating all it's control structures). |
69 | * (the SLAB allocator uses itself for allocating all it's control structures). |
70 | * |
70 | * |
71 | * The SLAB allocator allocates lot of space and does not free it. When |
71 | * The SLAB allocator allocates lot of space and does not free it. When |
72 | * frame allocator fails to allocate the frame, it calls slab_reclaim(). |
72 | * frame allocator fails to allocate the frame, it calls slab_reclaim(). |
73 | * It tries 'light reclaim' first, then brutal reclaim. The light reclaim |
73 | * It tries 'light reclaim' first, then brutal reclaim. The light reclaim |
74 | * releases slabs from cpu-shared magazine-list, until at least 1 slab |
74 | * releases slabs from cpu-shared magazine-list, until at least 1 slab |
75 | * is deallocated in each cache (this algorithm should probably change). |
75 | * is deallocated in each cache (this algorithm should probably change). |
76 | * The brutal reclaim removes all cached objects, even from CPU-bound |
76 | * The brutal reclaim removes all cached objects, even from CPU-bound |
77 | * magazines. |
77 | * magazines. |
78 | * |
78 | * |
79 | * TODO: For better CPU-scaling the magazine allocation strategy should |
79 | * TODO: For better CPU-scaling the magazine allocation strategy should |
80 | * be extended. Currently, if the cache does not have magazine, it asks |
80 | * be extended. Currently, if the cache does not have magazine, it asks |
81 | * for non-cpu cached magazine cache to provide one. It might be feasible |
81 | * for non-cpu cached magazine cache to provide one. It might be feasible |
82 | * to add cpu-cached magazine cache (which would allocate it's magazines |
82 | * to add cpu-cached magazine cache (which would allocate it's magazines |
83 | * from non-cpu-cached mag. cache). This would provide a nice per-cpu |
83 | * from non-cpu-cached mag. cache). This would provide a nice per-cpu |
84 | * buffer. The other possibility is to use the per-cache |
84 | * buffer. The other possibility is to use the per-cache |
85 | * 'empty-magazine-list', which decreases competing for 1 per-system |
85 | * 'empty-magazine-list', which decreases competing for 1 per-system |
86 | * magazine cache. |
86 | * magazine cache. |
87 | * |
87 | * |
88 | * - it might be good to add granularity of locks even to slab level, |
88 | * - it might be good to add granularity of locks even to slab level, |
89 | * we could then try_spinlock over all partial slabs and thus improve |
89 | * we could then try_spinlock over all partial slabs and thus improve |
90 | * scalability even on slab level |
90 | * scalability even on slab level |
91 | */ |
91 | */ |
92 | 92 | ||
93 | 93 | ||
94 | #include <synch/spinlock.h> |
94 | #include <synch/spinlock.h> |
95 | #include <mm/slab.h> |
95 | #include <mm/slab.h> |
96 | #include <list.h> |
96 | #include <list.h> |
97 | #include <memstr.h> |
97 | #include <memstr.h> |
98 | #include <align.h> |
98 | #include <align.h> |
99 | #include <mm/heap.h> |
99 | #include <mm/heap.h> |
100 | #include <mm/frame.h> |
100 | #include <mm/frame.h> |
101 | #include <config.h> |
101 | #include <config.h> |
102 | #include <print.h> |
102 | #include <print.h> |
103 | #include <arch.h> |
103 | #include <arch.h> |
104 | #include <panic.h> |
104 | #include <panic.h> |
105 | #include <debug.h> |
105 | #include <debug.h> |
106 | #include <bitops.h> |
106 | #include <bitops.h> |
107 | 107 | ||
108 | SPINLOCK_INITIALIZE(slab_cache_lock); |
108 | SPINLOCK_INITIALIZE(slab_cache_lock); |
109 | static LIST_INITIALIZE(slab_cache_list); |
109 | static LIST_INITIALIZE(slab_cache_list); |
110 | 110 | ||
111 | /** Magazine cache */ |
111 | /** Magazine cache */ |
112 | static slab_cache_t mag_cache; |
112 | static slab_cache_t mag_cache; |
113 | /** Cache for cache descriptors */ |
113 | /** Cache for cache descriptors */ |
114 | static slab_cache_t slab_cache_cache; |
114 | static slab_cache_t slab_cache_cache; |
115 | 115 | ||
116 | /** Cache for external slab descriptors |
116 | /** Cache for external slab descriptors |
117 | * This time we want per-cpu cache, so do not make it static |
117 | * This time we want per-cpu cache, so do not make it static |
118 | * - using SLAB for internal SLAB structures will not deadlock, |
118 | * - using SLAB for internal SLAB structures will not deadlock, |
119 | * as all slab structures are 'small' - control structures of |
119 | * as all slab structures are 'small' - control structures of |
120 | * their caches do not require further allocation |
120 | * their caches do not require further allocation |
121 | */ |
121 | */ |
122 | static slab_cache_t *slab_extern_cache; |
122 | static slab_cache_t *slab_extern_cache; |
123 | /** Caches for malloc */ |
123 | /** Caches for malloc */ |
124 | static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1]; |
124 | static slab_cache_t *malloc_caches[SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1]; |
125 | char *malloc_names[] = { |
125 | char *malloc_names[] = { |
126 | "malloc-8","malloc-16","malloc-32","malloc-64","malloc-128", |
126 | "malloc-8","malloc-16","malloc-32","malloc-64","malloc-128", |
127 | "malloc-256","malloc-512","malloc-1K","malloc-2K", |
127 | "malloc-256","malloc-512","malloc-1K","malloc-2K", |
128 | "malloc-4K","malloc-8K","malloc-16K","malloc-32K", |
128 | "malloc-4K","malloc-8K","malloc-16K","malloc-32K", |
129 | "malloc-64K","malloc-128K" |
129 | "malloc-64K","malloc-128K" |
130 | }; |
130 | }; |
131 | 131 | ||
132 | /** Slab descriptor */ |
132 | /** Slab descriptor */ |
133 | typedef struct { |
133 | typedef struct { |
134 | slab_cache_t *cache; /**< Pointer to parent cache */ |
134 | slab_cache_t *cache; /**< Pointer to parent cache */ |
135 | link_t link; /* List of full/partial slabs */ |
135 | link_t link; /* List of full/partial slabs */ |
136 | void *start; /**< Start address of first available item */ |
136 | void *start; /**< Start address of first available item */ |
137 | count_t available; /**< Count of available items in this slab */ |
137 | count_t available; /**< Count of available items in this slab */ |
138 | index_t nextavail; /**< The index of next available item */ |
138 | index_t nextavail; /**< The index of next available item */ |
139 | }slab_t; |
139 | }slab_t; |
140 | 140 | ||
141 | /**************************************/ |
141 | /**************************************/ |
142 | /* SLAB allocation functions */ |
142 | /* SLAB allocation functions */ |
143 | 143 | ||
144 | /** |
144 | /** |
145 | * Allocate frames for slab space and initialize |
145 | * Allocate frames for slab space and initialize |
146 | * |
146 | * |
147 | */ |
147 | */ |
148 | static slab_t * slab_space_alloc(slab_cache_t *cache, int flags) |
148 | static slab_t * slab_space_alloc(slab_cache_t *cache, int flags) |
149 | { |
149 | { |
150 | void *data; |
150 | void *data; |
151 | slab_t *slab; |
151 | slab_t *slab; |
152 | size_t fsize; |
152 | size_t fsize; |
153 | int i; |
153 | int i; |
154 | zone_t *zone = NULL; |
154 | zone_t *zone = NULL; |
155 | int status; |
155 | int status; |
156 | frame_t *frame; |
156 | frame_t *frame; |
157 | 157 | ||
158 | data = (void *)frame_alloc(FRAME_KA | flags, cache->order, &status, &zone); |
158 | data = (void *)frame_alloc(FRAME_KA | flags, cache->order, &status, &zone); |
159 | if (status != FRAME_OK) { |
159 | if (status != FRAME_OK) { |
160 | return NULL; |
160 | return NULL; |
161 | } |
161 | } |
162 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) { |
162 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) { |
163 | slab = slab_alloc(slab_extern_cache, flags); |
163 | slab = slab_alloc(slab_extern_cache, flags); |
164 | if (!slab) { |
164 | if (!slab) { |
165 | frame_free((__address)data); |
165 | frame_free((__address)data); |
166 | return NULL; |
166 | return NULL; |
167 | } |
167 | } |
168 | } else { |
168 | } else { |
169 | fsize = (PAGE_SIZE << cache->order); |
169 | fsize = (PAGE_SIZE << cache->order); |
170 | slab = data + fsize - sizeof(*slab); |
170 | slab = data + fsize - sizeof(*slab); |
171 | } |
171 | } |
172 | 172 | ||
173 | /* Fill in slab structures */ |
173 | /* Fill in slab structures */ |
174 | /* TODO: some better way of accessing the frame */ |
174 | /* TODO: some better way of accessing the frame */ |
175 | for (i=0; i < (1 << cache->order); i++) { |
175 | for (i=0; i < (1 << cache->order); i++) { |
176 | frame = ADDR2FRAME(zone, KA2PA((__address)(data+i*PAGE_SIZE))); |
176 | frame = ADDR2FRAME(zone, KA2PA((__address)(data+i*PAGE_SIZE))); |
177 | frame->parent = slab; |
177 | frame->parent = slab; |
178 | } |
178 | } |
179 | 179 | ||
180 | slab->start = data; |
180 | slab->start = data; |
181 | slab->available = cache->objects; |
181 | slab->available = cache->objects; |
182 | slab->nextavail = 0; |
182 | slab->nextavail = 0; |
183 | slab->cache = cache; |
183 | slab->cache = cache; |
184 | 184 | ||
185 | for (i=0; i<cache->objects;i++) |
185 | for (i=0; i<cache->objects;i++) |
186 | *((int *) (slab->start + i*cache->size)) = i+1; |
186 | *((int *) (slab->start + i*cache->size)) = i+1; |
187 | 187 | ||
188 | atomic_inc(&cache->allocated_slabs); |
188 | atomic_inc(&cache->allocated_slabs); |
189 | return slab; |
189 | return slab; |
190 | } |
190 | } |
191 | 191 | ||
192 | /** |
192 | /** |
193 | * Deallocate space associated with SLAB |
193 | * Deallocate space associated with SLAB |
194 | * |
194 | * |
195 | * @return number of freed frames |
195 | * @return number of freed frames |
196 | */ |
196 | */ |
197 | static count_t slab_space_free(slab_cache_t *cache, slab_t *slab) |
197 | static count_t slab_space_free(slab_cache_t *cache, slab_t *slab) |
198 | { |
198 | { |
199 | frame_free((__address)slab->start); |
199 | frame_free((__address)slab->start); |
200 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) |
200 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) |
201 | slab_free(slab_extern_cache, slab); |
201 | slab_free(slab_extern_cache, slab); |
202 | 202 | ||
203 | atomic_dec(&cache->allocated_slabs); |
203 | atomic_dec(&cache->allocated_slabs); |
204 | 204 | ||
205 | return 1 << cache->order; |
205 | return 1 << cache->order; |
206 | } |
206 | } |
207 | 207 | ||
208 | /** Map object to slab structure */ |
208 | /** Map object to slab structure */ |
209 | static slab_t * obj2slab(void *obj) |
209 | static slab_t * obj2slab(void *obj) |
210 | { |
210 | { |
211 | frame_t *frame; |
211 | frame_t *frame; |
212 | 212 | ||
213 | frame = frame_addr2frame((__address)obj); |
213 | frame = frame_addr2frame((__address)obj); |
214 | return (slab_t *)frame->parent; |
214 | return (slab_t *)frame->parent; |
215 | } |
215 | } |
216 | 216 | ||
217 | /**************************************/ |
217 | /**************************************/ |
218 | /* SLAB functions */ |
218 | /* SLAB functions */ |
219 | 219 | ||
220 | 220 | ||
221 | /** |
221 | /** |
222 | * Return object to slab and call a destructor |
222 | * Return object to slab and call a destructor |
223 | * |
223 | * |
224 | * @param slab If the caller knows directly slab of the object, otherwise NULL |
224 | * @param slab If the caller knows directly slab of the object, otherwise NULL |
225 | * |
225 | * |
226 | * @return Number of freed pages |
226 | * @return Number of freed pages |
227 | */ |
227 | */ |
228 | static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, |
228 | static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, |
229 | slab_t *slab) |
229 | slab_t *slab) |
230 | { |
230 | { |
231 | if (!slab) |
231 | if (!slab) |
232 | slab = obj2slab(obj); |
232 | slab = obj2slab(obj); |
233 | 233 | ||
234 | ASSERT(slab->cache == cache); |
234 | ASSERT(slab->cache == cache); |
235 | ASSERT(slab->available < cache->objects); |
235 | ASSERT(slab->available < cache->objects); |
236 | 236 | ||
237 | spinlock_lock(&cache->slablock); |
237 | spinlock_lock(&cache->slablock); |
238 | 238 | ||
239 | *((int *)obj) = slab->nextavail; |
239 | *((int *)obj) = slab->nextavail; |
240 | slab->nextavail = (obj - slab->start)/cache->size; |
240 | slab->nextavail = (obj - slab->start)/cache->size; |
241 | slab->available++; |
241 | slab->available++; |
242 | 242 | ||
243 | /* Move it to correct list */ |
243 | /* Move it to correct list */ |
244 | if (slab->available == cache->objects) { |
244 | if (slab->available == cache->objects) { |
245 | /* Free associated memory */ |
245 | /* Free associated memory */ |
246 | list_remove(&slab->link); |
246 | list_remove(&slab->link); |
247 | spinlock_unlock(&cache->slablock); |
247 | spinlock_unlock(&cache->slablock); |
248 | 248 | ||
249 | return slab_space_free(cache, slab); |
249 | return slab_space_free(cache, slab); |
250 | 250 | ||
251 | } else if (slab->available == 1) { |
251 | } else if (slab->available == 1) { |
252 | /* It was in full, move to partial */ |
252 | /* It was in full, move to partial */ |
253 | list_remove(&slab->link); |
253 | list_remove(&slab->link); |
254 | list_prepend(&slab->link, &cache->partial_slabs); |
254 | list_prepend(&slab->link, &cache->partial_slabs); |
255 | spinlock_unlock(&cache->slablock); |
- | |
256 | } |
255 | } |
- | 256 | spinlock_unlock(&cache->slablock); |
|
257 | return 0; |
257 | return 0; |
258 | } |
258 | } |
259 | 259 | ||
260 | /** |
260 | /** |
261 | * Take new object from slab or create new if needed |
261 | * Take new object from slab or create new if needed |
262 | * |
262 | * |
263 | * @return Object address or null |
263 | * @return Object address or null |
264 | */ |
264 | */ |
265 | static void * slab_obj_create(slab_cache_t *cache, int flags) |
265 | static void * slab_obj_create(slab_cache_t *cache, int flags) |
266 | { |
266 | { |
267 | slab_t *slab; |
267 | slab_t *slab; |
268 | void *obj; |
268 | void *obj; |
269 | 269 | ||
270 | spinlock_lock(&cache->slablock); |
270 | spinlock_lock(&cache->slablock); |
271 | 271 | ||
272 | if (list_empty(&cache->partial_slabs)) { |
272 | if (list_empty(&cache->partial_slabs)) { |
273 | /* Allow recursion and reclaiming |
273 | /* Allow recursion and reclaiming |
274 | * - this should work, as the SLAB control structures |
274 | * - this should work, as the SLAB control structures |
275 | * are small and do not need to allocte with anything |
275 | * are small and do not need to allocte with anything |
276 | * other ten frame_alloc when they are allocating, |
276 | * other ten frame_alloc when they are allocating, |
277 | * that's why we should get recursion at most 1-level deep |
277 | * that's why we should get recursion at most 1-level deep |
278 | */ |
278 | */ |
279 | spinlock_unlock(&cache->slablock); |
279 | spinlock_unlock(&cache->slablock); |
280 | slab = slab_space_alloc(cache, flags); |
280 | slab = slab_space_alloc(cache, flags); |
281 | if (!slab) |
281 | if (!slab) |
282 | return NULL; |
282 | return NULL; |
283 | spinlock_lock(&cache->slablock); |
283 | spinlock_lock(&cache->slablock); |
284 | } else { |
284 | } else { |
285 | slab = list_get_instance(cache->partial_slabs.next, |
285 | slab = list_get_instance(cache->partial_slabs.next, |
286 | slab_t, |
286 | slab_t, |
287 | link); |
287 | link); |
288 | list_remove(&slab->link); |
288 | list_remove(&slab->link); |
289 | } |
289 | } |
290 | obj = slab->start + slab->nextavail * cache->size; |
290 | obj = slab->start + slab->nextavail * cache->size; |
291 | slab->nextavail = *((int *)obj); |
291 | slab->nextavail = *((int *)obj); |
292 | slab->available--; |
292 | slab->available--; |
293 | if (! slab->available) |
293 | if (! slab->available) |
294 | list_prepend(&slab->link, &cache->full_slabs); |
294 | list_prepend(&slab->link, &cache->full_slabs); |
295 | else |
295 | else |
296 | list_prepend(&slab->link, &cache->partial_slabs); |
296 | list_prepend(&slab->link, &cache->partial_slabs); |
297 | 297 | ||
298 | spinlock_unlock(&cache->slablock); |
298 | spinlock_unlock(&cache->slablock); |
299 | return obj; |
299 | return obj; |
300 | } |
300 | } |
301 | 301 | ||
302 | /**************************************/ |
302 | /**************************************/ |
303 | /* CPU-Cache slab functions */ |
303 | /* CPU-Cache slab functions */ |
304 | 304 | ||
305 | /** |
305 | /** |
306 | * Finds a full magazine in cache, takes it from list |
306 | * Finds a full magazine in cache, takes it from list |
307 | * and returns it |
307 | * and returns it |
308 | * |
308 | * |
309 | * @param first If true, return first, else last mag |
309 | * @param first If true, return first, else last mag |
310 | */ |
310 | */ |
311 | static slab_magazine_t * get_mag_from_cache(slab_cache_t *cache, |
311 | static slab_magazine_t * get_mag_from_cache(slab_cache_t *cache, |
312 | int first) |
312 | int first) |
313 | { |
313 | { |
314 | slab_magazine_t *mag = NULL; |
314 | slab_magazine_t *mag = NULL; |
315 | link_t *cur; |
315 | link_t *cur; |
316 | 316 | ||
317 | spinlock_lock(&cache->maglock); |
317 | spinlock_lock(&cache->maglock); |
318 | if (!list_empty(&cache->magazines)) { |
318 | if (!list_empty(&cache->magazines)) { |
319 | if (first) |
319 | if (first) |
320 | cur = cache->magazines.next; |
320 | cur = cache->magazines.next; |
321 | else |
321 | else |
322 | cur = cache->magazines.prev; |
322 | cur = cache->magazines.prev; |
323 | mag = list_get_instance(cur, slab_magazine_t, link); |
323 | mag = list_get_instance(cur, slab_magazine_t, link); |
324 | list_remove(&mag->link); |
324 | list_remove(&mag->link); |
325 | atomic_dec(&cache->magazine_counter); |
325 | atomic_dec(&cache->magazine_counter); |
326 | } |
326 | } |
327 | spinlock_unlock(&cache->maglock); |
327 | spinlock_unlock(&cache->maglock); |
328 | return mag; |
328 | return mag; |
329 | } |
329 | } |
330 | 330 | ||
331 | /** Prepend magazine to magazine list in cache */ |
331 | /** Prepend magazine to magazine list in cache */ |
332 | static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag) |
332 | static void put_mag_to_cache(slab_cache_t *cache, slab_magazine_t *mag) |
333 | { |
333 | { |
334 | spinlock_lock(&cache->maglock); |
334 | spinlock_lock(&cache->maglock); |
335 | 335 | ||
336 | list_prepend(&mag->link, &cache->magazines); |
336 | list_prepend(&mag->link, &cache->magazines); |
337 | atomic_inc(&cache->magazine_counter); |
337 | atomic_inc(&cache->magazine_counter); |
338 | 338 | ||
339 | spinlock_unlock(&cache->maglock); |
339 | spinlock_unlock(&cache->maglock); |
340 | } |
340 | } |
341 | 341 | ||
342 | /** |
342 | /** |
343 | * Free all objects in magazine and free memory associated with magazine |
343 | * Free all objects in magazine and free memory associated with magazine |
344 | * |
344 | * |
345 | * @return Number of freed pages |
345 | * @return Number of freed pages |
346 | */ |
346 | */ |
347 | static count_t magazine_destroy(slab_cache_t *cache, |
347 | static count_t magazine_destroy(slab_cache_t *cache, |
348 | slab_magazine_t *mag) |
348 | slab_magazine_t *mag) |
349 | { |
349 | { |
350 | int i; |
350 | int i; |
351 | count_t frames = 0; |
351 | count_t frames = 0; |
352 | 352 | ||
353 | for (i=0;i < mag->busy; i++) { |
353 | for (i=0;i < mag->busy; i++) { |
354 | frames += slab_obj_destroy(cache, mag->objs[i], NULL); |
354 | frames += slab_obj_destroy(cache, mag->objs[i], NULL); |
355 | atomic_dec(&cache->cached_objs); |
355 | atomic_dec(&cache->cached_objs); |
356 | } |
356 | } |
357 | 357 | ||
358 | slab_free(&mag_cache, mag); |
358 | slab_free(&mag_cache, mag); |
359 | 359 | ||
360 | return frames; |
360 | return frames; |
361 | } |
361 | } |
362 | 362 | ||
363 | /** |
363 | /** |
364 | * Find full magazine, set it as current and return it |
364 | * Find full magazine, set it as current and return it |
365 | * |
365 | * |
366 | * Assume cpu_magazine lock is held |
366 | * Assume cpu_magazine lock is held |
367 | */ |
367 | */ |
368 | static slab_magazine_t * get_full_current_mag(slab_cache_t *cache) |
368 | static slab_magazine_t * get_full_current_mag(slab_cache_t *cache) |
369 | { |
369 | { |
370 | slab_magazine_t *cmag, *lastmag, *newmag; |
370 | slab_magazine_t *cmag, *lastmag, *newmag; |
371 | 371 | ||
372 | cmag = cache->mag_cache[CPU->id].current; |
372 | cmag = cache->mag_cache[CPU->id].current; |
373 | lastmag = cache->mag_cache[CPU->id].last; |
373 | lastmag = cache->mag_cache[CPU->id].last; |
374 | if (cmag) { /* First try local CPU magazines */ |
374 | if (cmag) { /* First try local CPU magazines */ |
375 | if (cmag->busy) |
375 | if (cmag->busy) |
376 | return cmag; |
376 | return cmag; |
377 | 377 | ||
378 | if (lastmag && lastmag->busy) { |
378 | if (lastmag && lastmag->busy) { |
379 | cache->mag_cache[CPU->id].current = lastmag; |
379 | cache->mag_cache[CPU->id].current = lastmag; |
380 | cache->mag_cache[CPU->id].last = cmag; |
380 | cache->mag_cache[CPU->id].last = cmag; |
381 | return lastmag; |
381 | return lastmag; |
382 | } |
382 | } |
383 | } |
383 | } |
384 | /* Local magazines are empty, import one from magazine list */ |
384 | /* Local magazines are empty, import one from magazine list */ |
385 | newmag = get_mag_from_cache(cache, 1); |
385 | newmag = get_mag_from_cache(cache, 1); |
386 | if (!newmag) |
386 | if (!newmag) |
387 | return NULL; |
387 | return NULL; |
388 | 388 | ||
389 | if (lastmag) |
389 | if (lastmag) |
390 | magazine_destroy(cache, lastmag); |
390 | magazine_destroy(cache, lastmag); |
391 | 391 | ||
392 | cache->mag_cache[CPU->id].last = cmag; |
392 | cache->mag_cache[CPU->id].last = cmag; |
393 | cache->mag_cache[CPU->id].current = newmag; |
393 | cache->mag_cache[CPU->id].current = newmag; |
394 | return newmag; |
394 | return newmag; |
395 | } |
395 | } |
396 | 396 | ||
397 | /** |
397 | /** |
398 | * Try to find object in CPU-cache magazines |
398 | * Try to find object in CPU-cache magazines |
399 | * |
399 | * |
400 | * @return Pointer to object or NULL if not available |
400 | * @return Pointer to object or NULL if not available |
401 | */ |
401 | */ |
402 | static void * magazine_obj_get(slab_cache_t *cache) |
402 | static void * magazine_obj_get(slab_cache_t *cache) |
403 | { |
403 | { |
404 | slab_magazine_t *mag; |
404 | slab_magazine_t *mag; |
405 | void *obj; |
405 | void *obj; |
406 | 406 | ||
407 | if (!CPU) |
407 | if (!CPU) |
408 | return NULL; |
408 | return NULL; |
409 | 409 | ||
410 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
410 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
411 | 411 | ||
412 | mag = get_full_current_mag(cache); |
412 | mag = get_full_current_mag(cache); |
413 | if (!mag) { |
413 | if (!mag) { |
414 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
414 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
415 | return NULL; |
415 | return NULL; |
416 | } |
416 | } |
417 | obj = mag->objs[--mag->busy]; |
417 | obj = mag->objs[--mag->busy]; |
418 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
418 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
419 | atomic_dec(&cache->cached_objs); |
419 | atomic_dec(&cache->cached_objs); |
420 | 420 | ||
421 | return obj; |
421 | return obj; |
422 | } |
422 | } |
423 | 423 | ||
424 | /** |
424 | /** |
425 | * Assure that the current magazine is empty, return pointer to it, or NULL if |
425 | * Assure that the current magazine is empty, return pointer to it, or NULL if |
426 | * no empty magazine is available and cannot be allocated |
426 | * no empty magazine is available and cannot be allocated |
427 | * |
427 | * |
428 | * Assume mag_cache[CPU->id].lock is held |
428 | * Assume mag_cache[CPU->id].lock is held |
429 | * |
429 | * |
430 | * We have 2 magazines bound to processor. |
430 | * We have 2 magazines bound to processor. |
431 | * First try the current. |
431 | * First try the current. |
432 | * If full, try the last. |
432 | * If full, try the last. |
433 | * If full, put to magazines list. |
433 | * If full, put to magazines list. |
434 | * allocate new, exchange last & current |
434 | * allocate new, exchange last & current |
435 | * |
435 | * |
436 | */ |
436 | */ |
437 | static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache) |
437 | static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache) |
438 | { |
438 | { |
439 | slab_magazine_t *cmag,*lastmag,*newmag; |
439 | slab_magazine_t *cmag,*lastmag,*newmag; |
440 | 440 | ||
441 | cmag = cache->mag_cache[CPU->id].current; |
441 | cmag = cache->mag_cache[CPU->id].current; |
442 | lastmag = cache->mag_cache[CPU->id].last; |
442 | lastmag = cache->mag_cache[CPU->id].last; |
443 | 443 | ||
444 | if (cmag) { |
444 | if (cmag) { |
445 | if (cmag->busy < cmag->size) |
445 | if (cmag->busy < cmag->size) |
446 | return cmag; |
446 | return cmag; |
447 | if (lastmag && lastmag->busy < lastmag->size) { |
447 | if (lastmag && lastmag->busy < lastmag->size) { |
448 | cache->mag_cache[CPU->id].last = cmag; |
448 | cache->mag_cache[CPU->id].last = cmag; |
449 | cache->mag_cache[CPU->id].current = lastmag; |
449 | cache->mag_cache[CPU->id].current = lastmag; |
450 | return lastmag; |
450 | return lastmag; |
451 | } |
451 | } |
452 | } |
452 | } |
453 | /* current | last are full | nonexistent, allocate new */ |
453 | /* current | last are full | nonexistent, allocate new */ |
454 | /* We do not want to sleep just because of caching */ |
454 | /* We do not want to sleep just because of caching */ |
455 | /* Especially we do not want reclaiming to start, as |
455 | /* Especially we do not want reclaiming to start, as |
456 | * this would deadlock */ |
456 | * this would deadlock */ |
457 | newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); |
457 | newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); |
458 | if (!newmag) |
458 | if (!newmag) |
459 | return NULL; |
459 | return NULL; |
460 | newmag->size = SLAB_MAG_SIZE; |
460 | newmag->size = SLAB_MAG_SIZE; |
461 | newmag->busy = 0; |
461 | newmag->busy = 0; |
462 | 462 | ||
463 | /* Flush last to magazine list */ |
463 | /* Flush last to magazine list */ |
464 | if (lastmag) |
464 | if (lastmag) |
465 | put_mag_to_cache(cache, lastmag); |
465 | put_mag_to_cache(cache, lastmag); |
466 | 466 | ||
467 | /* Move current as last, save new as current */ |
467 | /* Move current as last, save new as current */ |
468 | cache->mag_cache[CPU->id].last = cmag; |
468 | cache->mag_cache[CPU->id].last = cmag; |
469 | cache->mag_cache[CPU->id].current = newmag; |
469 | cache->mag_cache[CPU->id].current = newmag; |
470 | 470 | ||
471 | return newmag; |
471 | return newmag; |
472 | } |
472 | } |
473 | 473 | ||
474 | /** |
474 | /** |
475 | * Put object into CPU-cache magazine |
475 | * Put object into CPU-cache magazine |
476 | * |
476 | * |
477 | * @return 0 - success, -1 - could not get memory |
477 | * @return 0 - success, -1 - could not get memory |
478 | */ |
478 | */ |
479 | static int magazine_obj_put(slab_cache_t *cache, void *obj) |
479 | static int magazine_obj_put(slab_cache_t *cache, void *obj) |
480 | { |
480 | { |
481 | slab_magazine_t *mag; |
481 | slab_magazine_t *mag; |
482 | 482 | ||
483 | if (!CPU) |
483 | if (!CPU) |
484 | return -1; |
484 | return -1; |
485 | 485 | ||
486 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
486 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
487 | 487 | ||
488 | mag = make_empty_current_mag(cache); |
488 | mag = make_empty_current_mag(cache); |
489 | if (!mag) { |
489 | if (!mag) { |
490 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
490 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
491 | return -1; |
491 | return -1; |
492 | } |
492 | } |
493 | 493 | ||
494 | mag->objs[mag->busy++] = obj; |
494 | mag->objs[mag->busy++] = obj; |
495 | 495 | ||
496 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
496 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
497 | atomic_inc(&cache->cached_objs); |
497 | atomic_inc(&cache->cached_objs); |
498 | return 0; |
498 | return 0; |
499 | } |
499 | } |
500 | 500 | ||
501 | 501 | ||
502 | /**************************************/ |
502 | /**************************************/ |
503 | /* SLAB CACHE functions */ |
503 | /* SLAB CACHE functions */ |
504 | 504 | ||
505 | /** Return number of objects that fit in certain cache size */ |
505 | /** Return number of objects that fit in certain cache size */ |
506 | static int comp_objects(slab_cache_t *cache) |
506 | static int comp_objects(slab_cache_t *cache) |
507 | { |
507 | { |
508 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
508 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
509 | return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size; |
509 | return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size; |
510 | else |
510 | else |
511 | return (PAGE_SIZE << cache->order) / cache->size; |
511 | return (PAGE_SIZE << cache->order) / cache->size; |
512 | } |
512 | } |
513 | 513 | ||
514 | /** Return wasted space in slab */ |
514 | /** Return wasted space in slab */ |
515 | static int badness(slab_cache_t *cache) |
515 | static int badness(slab_cache_t *cache) |
516 | { |
516 | { |
517 | int objects; |
517 | int objects; |
518 | int ssize; |
518 | int ssize; |
519 | 519 | ||
520 | objects = comp_objects(cache); |
520 | objects = comp_objects(cache); |
521 | ssize = PAGE_SIZE << cache->order; |
521 | ssize = PAGE_SIZE << cache->order; |
522 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
522 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
523 | ssize -= sizeof(slab_t); |
523 | ssize -= sizeof(slab_t); |
524 | return ssize - objects*cache->size; |
524 | return ssize - objects*cache->size; |
525 | } |
525 | } |
526 | 526 | ||
527 | /** Initialize allocated memory as a slab cache */ |
527 | /** Initialize allocated memory as a slab cache */ |
528 | static void |
528 | static void |
529 | _slab_cache_create(slab_cache_t *cache, |
529 | _slab_cache_create(slab_cache_t *cache, |
530 | char *name, |
530 | char *name, |
531 | size_t size, |
531 | size_t size, |
532 | size_t align, |
532 | size_t align, |
533 | int (*constructor)(void *obj, int kmflag), |
533 | int (*constructor)(void *obj, int kmflag), |
534 | void (*destructor)(void *obj), |
534 | void (*destructor)(void *obj), |
535 | int flags) |
535 | int flags) |
536 | { |
536 | { |
537 | int i; |
537 | int i; |
538 | int pages; |
538 | int pages; |
- | 539 | ipl_t ipl; |
|
539 | 540 | ||
540 | memsetb((__address)cache, sizeof(*cache), 0); |
541 | memsetb((__address)cache, sizeof(*cache), 0); |
541 | cache->name = name; |
542 | cache->name = name; |
542 | 543 | ||
543 | if (align < sizeof(__native)) |
544 | if (align < sizeof(__native)) |
544 | align = sizeof(__native); |
545 | align = sizeof(__native); |
545 | size = ALIGN_UP(size, align); |
546 | size = ALIGN_UP(size, align); |
546 | 547 | ||
547 | cache->size = size; |
548 | cache->size = size; |
548 | 549 | ||
549 | cache->constructor = constructor; |
550 | cache->constructor = constructor; |
550 | cache->destructor = destructor; |
551 | cache->destructor = destructor; |
551 | cache->flags = flags; |
552 | cache->flags = flags; |
552 | 553 | ||
553 | list_initialize(&cache->full_slabs); |
554 | list_initialize(&cache->full_slabs); |
554 | list_initialize(&cache->partial_slabs); |
555 | list_initialize(&cache->partial_slabs); |
555 | list_initialize(&cache->magazines); |
556 | list_initialize(&cache->magazines); |
556 | spinlock_initialize(&cache->slablock, "slab_lock"); |
557 | spinlock_initialize(&cache->slablock, "slab_lock"); |
557 | spinlock_initialize(&cache->maglock, "slab_maglock"); |
558 | spinlock_initialize(&cache->maglock, "slab_maglock"); |
558 | if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) { |
559 | if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) { |
559 | for (i=0; i < config.cpu_count; i++) { |
560 | for (i=0; i < config.cpu_count; i++) { |
560 | memsetb((__address)&cache->mag_cache[i], |
561 | memsetb((__address)&cache->mag_cache[i], |
561 | sizeof(cache->mag_cache[i]), 0); |
562 | sizeof(cache->mag_cache[i]), 0); |
562 | spinlock_initialize(&cache->mag_cache[i].lock, |
563 | spinlock_initialize(&cache->mag_cache[i].lock, |
563 | "slab_maglock_cpu"); |
564 | "slab_maglock_cpu"); |
564 | } |
565 | } |
565 | } |
566 | } |
566 | 567 | ||
567 | /* Compute slab sizes, object counts in slabs etc. */ |
568 | /* Compute slab sizes, object counts in slabs etc. */ |
568 | if (cache->size < SLAB_INSIDE_SIZE) |
569 | if (cache->size < SLAB_INSIDE_SIZE) |
569 | cache->flags |= SLAB_CACHE_SLINSIDE; |
570 | cache->flags |= SLAB_CACHE_SLINSIDE; |
570 | 571 | ||
571 | /* Minimum slab order */ |
572 | /* Minimum slab order */ |
572 | pages = ((cache->size-1) >> PAGE_WIDTH) + 1; |
573 | pages = ((cache->size-1) >> PAGE_WIDTH) + 1; |
573 | cache->order = fnzb(pages); |
574 | cache->order = fnzb(pages); |
574 | 575 | ||
575 | while (badness(cache) > SLAB_MAX_BADNESS(cache)) { |
576 | while (badness(cache) > SLAB_MAX_BADNESS(cache)) { |
576 | cache->order += 1; |
577 | cache->order += 1; |
577 | } |
578 | } |
578 | cache->objects = comp_objects(cache); |
579 | cache->objects = comp_objects(cache); |
579 | /* If info fits in, put it inside */ |
580 | /* If info fits in, put it inside */ |
580 | if (badness(cache) > sizeof(slab_t)) |
581 | if (badness(cache) > sizeof(slab_t)) |
581 | cache->flags |= SLAB_CACHE_SLINSIDE; |
582 | cache->flags |= SLAB_CACHE_SLINSIDE; |
582 | 583 | ||
- | 584 | /* Add cache to cache list */ |
|
- | 585 | ipl = interrupts_disable(); |
|
583 | spinlock_lock(&slab_cache_lock); |
586 | spinlock_lock(&slab_cache_lock); |
584 | 587 | ||
585 | list_append(&cache->link, &slab_cache_list); |
588 | list_append(&cache->link, &slab_cache_list); |
586 | 589 | ||
587 | spinlock_unlock(&slab_cache_lock); |
590 | spinlock_unlock(&slab_cache_lock); |
- | 591 | interrupts_restore(ipl); |
|
588 | } |
592 | } |
589 | 593 | ||
590 | /** Create slab cache */ |
594 | /** Create slab cache */ |
591 | slab_cache_t * slab_cache_create(char *name, |
595 | slab_cache_t * slab_cache_create(char *name, |
592 | size_t size, |
596 | size_t size, |
593 | size_t align, |
597 | size_t align, |
594 | int (*constructor)(void *obj, int kmflag), |
598 | int (*constructor)(void *obj, int kmflag), |
595 | void (*destructor)(void *obj), |
599 | void (*destructor)(void *obj), |
596 | int flags) |
600 | int flags) |
597 | { |
601 | { |
598 | slab_cache_t *cache; |
602 | slab_cache_t *cache; |
599 | 603 | ||
600 | cache = slab_alloc(&slab_cache_cache, 0); |
604 | cache = slab_alloc(&slab_cache_cache, 0); |
601 | _slab_cache_create(cache, name, size, align, constructor, destructor, |
605 | _slab_cache_create(cache, name, size, align, constructor, destructor, |
602 | flags); |
606 | flags); |
603 | return cache; |
607 | return cache; |
604 | } |
608 | } |
605 | 609 | ||
606 | /** |
610 | /** |
607 | * Reclaim space occupied by objects that are already free |
611 | * Reclaim space occupied by objects that are already free |
608 | * |
612 | * |
609 | * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing |
613 | * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing |
610 | * @return Number of freed pages |
614 | * @return Number of freed pages |
611 | */ |
615 | */ |
612 | static count_t _slab_reclaim(slab_cache_t *cache, int flags) |
616 | static count_t _slab_reclaim(slab_cache_t *cache, int flags) |
613 | { |
617 | { |
614 | int i; |
618 | int i; |
615 | slab_magazine_t *mag; |
619 | slab_magazine_t *mag; |
616 | count_t frames = 0; |
620 | count_t frames = 0; |
617 | int magcount; |
621 | int magcount; |
618 | 622 | ||
619 | if (cache->flags & SLAB_CACHE_NOMAGAZINE) |
623 | if (cache->flags & SLAB_CACHE_NOMAGAZINE) |
620 | return 0; /* Nothing to do */ |
624 | return 0; /* Nothing to do */ |
621 | 625 | ||
622 | /* We count up to original magazine count to avoid |
626 | /* We count up to original magazine count to avoid |
623 | * endless loop |
627 | * endless loop |
624 | */ |
628 | */ |
625 | magcount = atomic_get(&cache->magazine_counter); |
629 | magcount = atomic_get(&cache->magazine_counter); |
626 | while (magcount-- && (mag=get_mag_from_cache(cache,0))) { |
630 | while (magcount-- && (mag=get_mag_from_cache(cache,0))) { |
627 | frames += magazine_destroy(cache,mag); |
631 | frames += magazine_destroy(cache,mag); |
628 | if (!(flags & SLAB_RECLAIM_ALL) && frames) |
632 | if (!(flags & SLAB_RECLAIM_ALL) && frames) |
629 | break; |
633 | break; |
630 | } |
634 | } |
631 | 635 | ||
632 | if (flags & SLAB_RECLAIM_ALL) { |
636 | if (flags & SLAB_RECLAIM_ALL) { |
633 | /* Free cpu-bound magazines */ |
637 | /* Free cpu-bound magazines */ |
634 | /* Destroy CPU magazines */ |
638 | /* Destroy CPU magazines */ |
635 | for (i=0; i<config.cpu_count; i++) { |
639 | for (i=0; i<config.cpu_count; i++) { |
636 | spinlock_lock(&cache->mag_cache[i].lock); |
640 | spinlock_lock(&cache->mag_cache[i].lock); |
637 | 641 | ||
638 | mag = cache->mag_cache[i].current; |
642 | mag = cache->mag_cache[i].current; |
639 | if (mag) |
643 | if (mag) |
640 | frames += magazine_destroy(cache, mag); |
644 | frames += magazine_destroy(cache, mag); |
641 | cache->mag_cache[i].current = NULL; |
645 | cache->mag_cache[i].current = NULL; |
642 | 646 | ||
643 | mag = cache->mag_cache[i].last; |
647 | mag = cache->mag_cache[i].last; |
644 | if (mag) |
648 | if (mag) |
645 | frames += magazine_destroy(cache, mag); |
649 | frames += magazine_destroy(cache, mag); |
646 | cache->mag_cache[i].last = NULL; |
650 | cache->mag_cache[i].last = NULL; |
647 | 651 | ||
648 | spinlock_unlock(&cache->mag_cache[i].lock); |
652 | spinlock_unlock(&cache->mag_cache[i].lock); |
649 | } |
653 | } |
650 | } |
654 | } |
651 | 655 | ||
652 | return frames; |
656 | return frames; |
653 | } |
657 | } |
654 | 658 | ||
655 | /** Check that there are no slabs and remove cache from system */ |
659 | /** Check that there are no slabs and remove cache from system */ |
656 | void slab_cache_destroy(slab_cache_t *cache) |
660 | void slab_cache_destroy(slab_cache_t *cache) |
657 | { |
661 | { |
658 | ipl_t ipl; |
662 | ipl_t ipl; |
659 | 663 | ||
660 | /* First remove cache from link, so that we don't need |
664 | /* First remove cache from link, so that we don't need |
661 | * to disable interrupts later |
665 | * to disable interrupts later |
662 | */ |
666 | */ |
663 | 667 | ||
664 | ipl = interrupts_disable(); |
668 | ipl = interrupts_disable(); |
665 | spinlock_lock(&slab_cache_lock); |
669 | spinlock_lock(&slab_cache_lock); |
666 | 670 | ||
667 | list_remove(&cache->link); |
671 | list_remove(&cache->link); |
668 | 672 | ||
669 | spinlock_unlock(&slab_cache_lock); |
673 | spinlock_unlock(&slab_cache_lock); |
670 | interrupts_restore(ipl); |
674 | interrupts_restore(ipl); |
671 | 675 | ||
672 | /* Do not lock anything, we assume the software is correct and |
676 | /* Do not lock anything, we assume the software is correct and |
673 | * does not touch the cache when it decides to destroy it */ |
677 | * does not touch the cache when it decides to destroy it */ |
674 | 678 | ||
675 | /* Destroy all magazines */ |
679 | /* Destroy all magazines */ |
676 | _slab_reclaim(cache, SLAB_RECLAIM_ALL); |
680 | _slab_reclaim(cache, SLAB_RECLAIM_ALL); |
677 | 681 | ||
678 | /* All slabs must be empty */ |
682 | /* All slabs must be empty */ |
679 | if (!list_empty(&cache->full_slabs) \ |
683 | if (!list_empty(&cache->full_slabs) \ |
680 | || !list_empty(&cache->partial_slabs)) |
684 | || !list_empty(&cache->partial_slabs)) |
681 | panic("Destroying cache that is not empty."); |
685 | panic("Destroying cache that is not empty."); |
682 | 686 | ||
683 | slab_free(&slab_cache_cache, cache); |
687 | slab_free(&slab_cache_cache, cache); |
684 | } |
688 | } |
685 | 689 | ||
686 | /** Allocate new object from cache - if no flags given, always returns |
690 | /** Allocate new object from cache - if no flags given, always returns |
687 | memory */ |
691 | memory */ |
688 | void * slab_alloc(slab_cache_t *cache, int flags) |
692 | void * slab_alloc(slab_cache_t *cache, int flags) |
689 | { |
693 | { |
690 | ipl_t ipl; |
694 | ipl_t ipl; |
691 | void *result = NULL; |
695 | void *result = NULL; |
692 | 696 | ||
693 | /* Disable interrupts to avoid deadlocks with interrupt handlers */ |
697 | /* Disable interrupts to avoid deadlocks with interrupt handlers */ |
694 | ipl = interrupts_disable(); |
698 | ipl = interrupts_disable(); |
695 | 699 | ||
696 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) |
700 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) |
697 | result = magazine_obj_get(cache); |
701 | result = magazine_obj_get(cache); |
698 | if (!result) |
702 | if (!result) |
699 | result = slab_obj_create(cache, flags); |
703 | result = slab_obj_create(cache, flags); |
700 | 704 | ||
701 | interrupts_restore(ipl); |
705 | interrupts_restore(ipl); |
702 | 706 | ||
703 | if (result) |
707 | if (result) |
704 | atomic_inc(&cache->allocated_objs); |
708 | atomic_inc(&cache->allocated_objs); |
705 | 709 | ||
706 | return result; |
710 | return result; |
707 | } |
711 | } |
708 | 712 | ||
709 | /** Return object to cache, use slab if known */ |
713 | /** Return object to cache, use slab if known */ |
710 | static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) |
714 | static void _slab_free(slab_cache_t *cache, void *obj, slab_t *slab) |
711 | { |
715 | { |
712 | ipl_t ipl; |
716 | ipl_t ipl; |
713 | 717 | ||
714 | ipl = interrupts_disable(); |
718 | ipl = interrupts_disable(); |
715 | 719 | ||
716 | if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \ |
720 | if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \ |
717 | || magazine_obj_put(cache, obj)) { |
721 | || magazine_obj_put(cache, obj)) { |
718 | 722 | ||
719 | slab_obj_destroy(cache, obj, slab); |
723 | slab_obj_destroy(cache, obj, slab); |
720 | 724 | ||
721 | } |
725 | } |
722 | interrupts_restore(ipl); |
726 | interrupts_restore(ipl); |
723 | atomic_dec(&cache->allocated_objs); |
727 | atomic_dec(&cache->allocated_objs); |
724 | } |
728 | } |
725 | 729 | ||
726 | /** Return slab object to cache */ |
730 | /** Return slab object to cache */ |
727 | void slab_free(slab_cache_t *cache, void *obj) |
731 | void slab_free(slab_cache_t *cache, void *obj) |
728 | { |
732 | { |
729 | _slab_free(cache,obj,NULL); |
733 | _slab_free(cache,obj,NULL); |
730 | } |
734 | } |
731 | 735 | ||
732 | /* Go through all caches and reclaim what is possible */ |
736 | /* Go through all caches and reclaim what is possible */ |
733 | count_t slab_reclaim(int flags) |
737 | count_t slab_reclaim(int flags) |
734 | { |
738 | { |
735 | slab_cache_t *cache; |
739 | slab_cache_t *cache; |
736 | link_t *cur; |
740 | link_t *cur; |
737 | count_t frames = 0; |
741 | count_t frames = 0; |
738 | 742 | ||
739 | spinlock_lock(&slab_cache_lock); |
743 | spinlock_lock(&slab_cache_lock); |
740 | 744 | ||
741 | /* TODO: Add assert, that interrupts are disabled, otherwise |
745 | /* TODO: Add assert, that interrupts are disabled, otherwise |
742 | * memory allocation from interrupts can deadlock. |
746 | * memory allocation from interrupts can deadlock. |
743 | */ |
747 | */ |
744 | 748 | ||
745 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
749 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
746 | cache = list_get_instance(cur, slab_cache_t, link); |
750 | cache = list_get_instance(cur, slab_cache_t, link); |
747 | frames += _slab_reclaim(cache, flags); |
751 | frames += _slab_reclaim(cache, flags); |
748 | } |
752 | } |
749 | 753 | ||
750 | spinlock_unlock(&slab_cache_lock); |
754 | spinlock_unlock(&slab_cache_lock); |
751 | 755 | ||
752 | return frames; |
756 | return frames; |
753 | } |
757 | } |
754 | 758 | ||
755 | 759 | ||
756 | /* Print list of slabs */ |
760 | /* Print list of slabs */ |
757 | void slab_print_list(void) |
761 | void slab_print_list(void) |
758 | { |
762 | { |
759 | slab_cache_t *cache; |
763 | slab_cache_t *cache; |
760 | link_t *cur; |
764 | link_t *cur; |
- | 765 | ipl_t ipl; |
|
761 | 766 | ||
- | 767 | ipl = interrupts_disable(); |
|
762 | spinlock_lock(&slab_cache_lock); |
768 | spinlock_lock(&slab_cache_lock); |
763 | printf("SLAB name\tOsize\tPages\tObj/pg\tSlabs\tCached\tAllocobjs\tCtl\n"); |
769 | printf("SLAB name\tOsize\tPages\tObj/pg\tSlabs\tCached\tAllocobjs\tCtl\n"); |
764 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
770 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
765 | cache = list_get_instance(cur, slab_cache_t, link); |
771 | cache = list_get_instance(cur, slab_cache_t, link); |
766 | printf("%s\t%d\t%d\t%d\t%d\t%d\t%d\t\t%s\n", cache->name, cache->size, |
772 | printf("%s\t%d\t%d\t%d\t%d\t%d\t%d\t\t%s\n", cache->name, cache->size, |
767 | (1 << cache->order), cache->objects, |
773 | (1 << cache->order), cache->objects, |
768 | atomic_get(&cache->allocated_slabs), |
774 | atomic_get(&cache->allocated_slabs), |
769 | atomic_get(&cache->cached_objs), |
775 | atomic_get(&cache->cached_objs), |
770 | atomic_get(&cache->allocated_objs), |
776 | atomic_get(&cache->allocated_objs), |
771 | cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out"); |
777 | cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out"); |
772 | } |
778 | } |
773 | spinlock_unlock(&slab_cache_lock); |
779 | spinlock_unlock(&slab_cache_lock); |
- | 780 | interrupts_restore(ipl); |
|
774 | } |
781 | } |
775 | 782 | ||
776 | #ifdef CONFIG_DEBUG |
783 | #ifdef CONFIG_DEBUG |
777 | static int _slab_initialized = 0; |
784 | static int _slab_initialized = 0; |
778 | #endif |
785 | #endif |
779 | 786 | ||
780 | void slab_cache_init(void) |
787 | void slab_cache_init(void) |
781 | { |
788 | { |
782 | int i, size; |
789 | int i, size; |
783 | 790 | ||
784 | /* Initialize magazine cache */ |
791 | /* Initialize magazine cache */ |
785 | _slab_cache_create(&mag_cache, |
792 | _slab_cache_create(&mag_cache, |
786 | "slab_magazine", |
793 | "slab_magazine", |
787 | sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*), |
794 | sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*), |
788 | sizeof(__address), |
795 | sizeof(__address), |
789 | NULL, NULL, |
796 | NULL, NULL, |
790 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
797 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
791 | /* Initialize slab_cache cache */ |
798 | /* Initialize slab_cache cache */ |
792 | _slab_cache_create(&slab_cache_cache, |
799 | _slab_cache_create(&slab_cache_cache, |
793 | "slab_cache", |
800 | "slab_cache", |
794 | sizeof(slab_cache_cache) + config.cpu_count*sizeof(slab_cache_cache.mag_cache[0]), |
801 | sizeof(slab_cache_cache) + config.cpu_count*sizeof(slab_cache_cache.mag_cache[0]), |
795 | sizeof(__address), |
802 | sizeof(__address), |
796 | NULL, NULL, |
803 | NULL, NULL, |
797 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
804 | SLAB_CACHE_NOMAGAZINE | SLAB_CACHE_SLINSIDE); |
798 | /* Initialize external slab cache */ |
805 | /* Initialize external slab cache */ |
799 | slab_extern_cache = slab_cache_create("slab_extern", |
806 | slab_extern_cache = slab_cache_create("slab_extern", |
800 | sizeof(slab_t), |
807 | sizeof(slab_t), |
801 | 0, NULL, NULL, |
808 | 0, NULL, NULL, |
802 | SLAB_CACHE_SLINSIDE); |
809 | SLAB_CACHE_SLINSIDE); |
803 | 810 | ||
804 | /* Initialize structures for malloc */ |
811 | /* Initialize structures for malloc */ |
805 | for (i=0, size=(1<<SLAB_MIN_MALLOC_W); |
812 | for (i=0, size=(1<<SLAB_MIN_MALLOC_W); |
806 | i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1); |
813 | i < (SLAB_MAX_MALLOC_W-SLAB_MIN_MALLOC_W+1); |
807 | i++, size <<= 1) { |
814 | i++, size <<= 1) { |
808 | malloc_caches[i] = slab_cache_create(malloc_names[i], |
815 | malloc_caches[i] = slab_cache_create(malloc_names[i], |
809 | size, 0, |
816 | size, 0, |
810 | NULL,NULL,0); |
817 | NULL,NULL,0); |
811 | } |
818 | } |
812 | #ifdef CONFIG_DEBUG |
819 | #ifdef CONFIG_DEBUG |
813 | _slab_initialized = 1; |
820 | _slab_initialized = 1; |
814 | #endif |
821 | #endif |
815 | } |
822 | } |
816 | 823 | ||
817 | /**************************************/ |
824 | /**************************************/ |
818 | /* kalloc/kfree functions */ |
825 | /* kalloc/kfree functions */ |
819 | void * kalloc(unsigned int size, int flags) |
826 | void * kalloc(unsigned int size, int flags) |
820 | { |
827 | { |
821 | int idx; |
828 | int idx; |
822 | 829 | ||
823 | ASSERT(_slab_initialized); |
830 | ASSERT(_slab_initialized); |
824 | ASSERT( size && size <= (1 << SLAB_MAX_MALLOC_W)); |
831 | ASSERT( size && size <= (1 << SLAB_MAX_MALLOC_W)); |
825 | 832 | ||
826 | if (size < (1 << SLAB_MIN_MALLOC_W)) |
833 | if (size < (1 << SLAB_MIN_MALLOC_W)) |
827 | size = (1 << SLAB_MIN_MALLOC_W); |
834 | size = (1 << SLAB_MIN_MALLOC_W); |
828 | 835 | ||
829 | idx = fnzb(size-1) - SLAB_MIN_MALLOC_W + 1; |
836 | idx = fnzb(size-1) - SLAB_MIN_MALLOC_W + 1; |
830 | 837 | ||
831 | return slab_alloc(malloc_caches[idx], flags); |
838 | return slab_alloc(malloc_caches[idx], flags); |
832 | } |
839 | } |
833 | 840 | ||
834 | 841 | ||
835 | void kfree(void *obj) |
842 | void kfree(void *obj) |
836 | { |
843 | { |
837 | slab_t *slab; |
844 | slab_t *slab; |
838 | 845 | ||
839 | if (!obj) return; |
846 | if (!obj) return; |
840 | 847 | ||
841 | slab = obj2slab(obj); |
848 | slab = obj2slab(obj); |
842 | _slab_free(slab->cache, obj, slab); |
849 | _slab_free(slab->cache, obj, slab); |
843 | } |
850 | } |
844 | 851 |