Rev 767 | Rev 769 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 767 | Rev 768 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2006 Ondrej Palkovsky |
2 | * Copyright (C) 2006 Ondrej Palkovsky |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | #include <synch/spinlock.h> |
29 | #include <synch/spinlock.h> |
30 | #include <mm/slab.h> |
30 | #include <mm/slab.h> |
31 | #include <list.h> |
31 | #include <list.h> |
32 | #include <memstr.h> |
32 | #include <memstr.h> |
33 | #include <align.h> |
33 | #include <align.h> |
34 | #include <mm/heap.h> |
34 | #include <mm/heap.h> |
35 | #include <mm/frame.h> |
35 | #include <mm/frame.h> |
36 | #include <config.h> |
36 | #include <config.h> |
37 | #include <print.h> |
37 | #include <print.h> |
38 | #include <arch.h> |
38 | #include <arch.h> |
39 | #include <panic.h> |
39 | #include <panic.h> |
40 | #include <debug.h> |
40 | #include <debug.h> |
41 | 41 | ||
42 | SPINLOCK_INITIALIZE(slab_cache_lock); |
42 | SPINLOCK_INITIALIZE(slab_cache_lock); |
43 | LIST_INITIALIZE(slab_cache_list); |
43 | LIST_INITIALIZE(slab_cache_list); |
44 | 44 | ||
45 | slab_cache_t mag_cache; |
45 | slab_cache_t mag_cache; |
46 | 46 | ||
47 | 47 | ||
48 | typedef struct { |
48 | typedef struct { |
49 | slab_cache_t *cache; /**< Pointer to parent cache */ |
49 | slab_cache_t *cache; /**< Pointer to parent cache */ |
50 | link_t link; /* List of full/partial slabs */ |
50 | link_t link; /* List of full/partial slabs */ |
51 | void *start; /**< Start address of first available item */ |
51 | void *start; /**< Start address of first available item */ |
52 | count_t available; /**< Count of available items in this slab */ |
52 | count_t available; /**< Count of available items in this slab */ |
53 | index_t nextavail; /**< The index of next available item */ |
53 | index_t nextavail; /**< The index of next available item */ |
54 | }slab_t; |
54 | }slab_t; |
55 | 55 | ||
56 | /**************************************/ |
56 | /**************************************/ |
57 | /* SLAB allocation functions */ |
57 | /* SLAB allocation functions */ |
58 | 58 | ||
59 | /** |
59 | /** |
60 | * Allocate frames for slab space and initialize |
60 | * Allocate frames for slab space and initialize |
61 | * |
61 | * |
62 | * TODO: Change slab_t allocation to slab_alloc(????), malloc with flags!! |
62 | * TODO: Change slab_t allocation to slab_alloc(????), malloc with flags!! |
63 | */ |
63 | */ |
64 | static slab_t * slab_space_alloc(slab_cache_t *cache, int flags) |
64 | static slab_t * slab_space_alloc(slab_cache_t *cache, int flags) |
65 | { |
65 | { |
66 | void *data; |
66 | void *data; |
67 | slab_t *slab; |
67 | slab_t *slab; |
68 | size_t fsize; |
68 | size_t fsize; |
69 | int i; |
69 | int i; |
70 | zone_t *zone = NULL; |
70 | zone_t *zone = NULL; |
71 | int status; |
71 | int status; |
72 | frame_t *frame; |
72 | frame_t *frame; |
73 | 73 | ||
74 | data = (void *)frame_alloc(FRAME_KA | flags, cache->order, &status, &zone); |
74 | data = (void *)frame_alloc(FRAME_KA | flags, cache->order, &status, &zone); |
75 | if (status != FRAME_OK) { |
75 | if (status != FRAME_OK) { |
76 | return NULL; |
76 | return NULL; |
77 | } |
77 | } |
78 | if (! cache->flags & SLAB_CACHE_SLINSIDE) { |
78 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) { |
79 | slab = malloc(sizeof(*slab)); // , flags); |
79 | slab = malloc(sizeof(*slab)); // , flags); |
80 | if (!slab) { |
80 | if (!slab) { |
81 | frame_free((__address)data); |
81 | frame_free((__address)data); |
82 | return NULL; |
82 | return NULL; |
83 | } |
83 | } |
84 | } else { |
84 | } else { |
85 | fsize = (PAGE_SIZE << cache->order); |
85 | fsize = (PAGE_SIZE << cache->order); |
86 | slab = data + fsize - sizeof(*slab); |
86 | slab = data + fsize - sizeof(*slab); |
87 | } |
87 | } |
88 | 88 | ||
89 | /* Fill in slab structures */ |
89 | /* Fill in slab structures */ |
90 | /* TODO: some better way of accessing the frame */ |
90 | /* TODO: some better way of accessing the frame */ |
91 | for (i=0; i < (1 << cache->order); i++) { |
91 | for (i=0; i < (1 << cache->order); i++) { |
92 | frame = ADDR2FRAME(zone, KA2PA((__address)(data+i*PAGE_SIZE))); |
92 | frame = ADDR2FRAME(zone, KA2PA((__address)(data+i*PAGE_SIZE))); |
93 | frame->parent = slab; |
93 | frame->parent = slab; |
94 | } |
94 | } |
95 | 95 | ||
96 | slab->start = data; |
96 | slab->start = data; |
97 | slab->available = cache->objects; |
97 | slab->available = cache->objects; |
98 | slab->nextavail = 0; |
98 | slab->nextavail = 0; |
99 | slab->cache = cache; |
99 | slab->cache = cache; |
100 | 100 | ||
101 | for (i=0; i<cache->objects;i++) |
101 | for (i=0; i<cache->objects;i++) |
102 | *((int *) (slab->start + i*cache->size)) = i+1; |
102 | *((int *) (slab->start + i*cache->size)) = i+1; |
103 | 103 | ||
104 | atomic_inc(&cache->allocated_slabs); |
104 | atomic_inc(&cache->allocated_slabs); |
105 | - | ||
106 | return slab; |
105 | return slab; |
107 | } |
106 | } |
108 | 107 | ||
109 | /** |
108 | /** |
110 | * Deallocate space associated with SLAB |
109 | * Deallocate space associated with SLAB |
111 | * |
110 | * |
112 | * @return number of freed frames |
111 | * @return number of freed frames |
113 | */ |
112 | */ |
114 | static count_t slab_space_free(slab_cache_t *cache, slab_t *slab) |
113 | static count_t slab_space_free(slab_cache_t *cache, slab_t *slab) |
115 | { |
114 | { |
116 | frame_free((__address)slab->start); |
115 | frame_free((__address)slab->start); |
117 | if (! cache->flags & SLAB_CACHE_SLINSIDE) |
116 | if (! (cache->flags & SLAB_CACHE_SLINSIDE)) |
118 | free(slab); |
117 | free(slab); |
119 | 118 | ||
120 | atomic_dec(&cache->allocated_slabs); |
119 | atomic_dec(&cache->allocated_slabs); |
121 | 120 | ||
122 | return 1 << cache->order; |
121 | return 1 << cache->order; |
123 | } |
122 | } |
124 | 123 | ||
125 | /** Map object to slab structure */ |
124 | /** Map object to slab structure */ |
126 | static slab_t * obj2slab(void *obj) |
125 | static slab_t * obj2slab(void *obj) |
127 | { |
126 | { |
128 | frame_t *frame; |
127 | frame_t *frame; |
129 | 128 | ||
130 | frame = frame_addr2frame((__address)obj); |
129 | frame = frame_addr2frame((__address)obj); |
131 | return (slab_t *)frame->parent; |
130 | return (slab_t *)frame->parent; |
132 | } |
131 | } |
133 | 132 | ||
134 | /**************************************/ |
133 | /**************************************/ |
135 | /* SLAB functions */ |
134 | /* SLAB functions */ |
136 | 135 | ||
137 | 136 | ||
138 | /** |
137 | /** |
139 | * Return object to slab and call a destructor |
138 | * Return object to slab and call a destructor |
140 | * |
139 | * |
141 | * Assume the cache->lock is held; |
140 | * Assume the cache->lock is held; |
142 | * |
141 | * |
143 | * @param slab If the caller knows directly slab of the object, otherwise NULL |
142 | * @param slab If the caller knows directly slab of the object, otherwise NULL |
144 | * |
143 | * |
145 | * @return Number of freed pages |
144 | * @return Number of freed pages |
146 | */ |
145 | */ |
147 | static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, |
146 | static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, |
148 | slab_t *slab) |
147 | slab_t *slab) |
149 | { |
148 | { |
150 | count_t frames = 0; |
149 | count_t frames = 0; |
151 | 150 | ||
152 | if (!slab) |
151 | if (!slab) |
153 | slab = obj2slab(obj); |
152 | slab = obj2slab(obj); |
154 | 153 | ||
155 | ASSERT(slab->cache == cache); |
154 | ASSERT(slab->cache == cache); |
156 | 155 | ||
157 | *((int *)obj) = slab->nextavail; |
156 | *((int *)obj) = slab->nextavail; |
158 | slab->nextavail = (obj - slab->start)/cache->size; |
157 | slab->nextavail = (obj - slab->start)/cache->size; |
159 | slab->available++; |
158 | slab->available++; |
160 | 159 | ||
161 | /* Move it to correct list */ |
160 | /* Move it to correct list */ |
162 | if (slab->available == 1) { |
161 | if (slab->available == 1) { |
163 | /* It was in full, move to partial */ |
162 | /* It was in full, move to partial */ |
164 | list_remove(&slab->link); |
163 | list_remove(&slab->link); |
165 | list_prepend(&slab->link, &cache->partial_slabs); |
164 | list_prepend(&slab->link, &cache->partial_slabs); |
166 | } |
165 | } |
167 | if (slab->available == cache->objects) { |
166 | if (slab->available == cache->objects) { |
168 | /* Free associated memory */ |
167 | /* Free associated memory */ |
169 | list_remove(&slab->link); |
168 | list_remove(&slab->link); |
170 | /* Avoid deadlock */ |
169 | /* Avoid deadlock */ |
171 | spinlock_unlock(&cache->lock); |
170 | spinlock_unlock(&cache->lock); |
172 | frames = slab_space_free(cache, slab); |
171 | frames = slab_space_free(cache, slab); |
173 | spinlock_lock(&cache->lock); |
172 | spinlock_lock(&cache->lock); |
174 | } |
173 | } |
175 | 174 | ||
176 | return frames; |
175 | return frames; |
177 | } |
176 | } |
178 | 177 | ||
179 | /** |
178 | /** |
180 | * Take new object from slab or create new if needed |
179 | * Take new object from slab or create new if needed |
181 | * |
180 | * |
182 | * Assume cache->lock is held. |
181 | * Assume cache->lock is held. |
183 | * |
182 | * |
184 | * @return Object address or null |
183 | * @return Object address or null |
185 | */ |
184 | */ |
186 | static void * slab_obj_create(slab_cache_t *cache, int flags) |
185 | static void * slab_obj_create(slab_cache_t *cache, int flags) |
187 | { |
186 | { |
188 | slab_t *slab; |
187 | slab_t *slab; |
189 | void *obj; |
188 | void *obj; |
190 | 189 | ||
191 | if (list_empty(&cache->partial_slabs)) { |
190 | if (list_empty(&cache->partial_slabs)) { |
192 | /* Allow recursion and reclaiming |
191 | /* Allow recursion and reclaiming |
193 | * - this should work, as the SLAB control structures |
192 | * - this should work, as the SLAB control structures |
194 | * are small and do not need to allocte with anything |
193 | * are small and do not need to allocte with anything |
195 | * other ten frame_alloc when they are allocating, |
194 | * other ten frame_alloc when they are allocating, |
196 | * that's why we should get recursion at most 1-level deep |
195 | * that's why we should get recursion at most 1-level deep |
197 | */ |
196 | */ |
198 | spinlock_unlock(&cache->lock); |
197 | spinlock_unlock(&cache->lock); |
199 | slab = slab_space_alloc(cache, flags); |
198 | slab = slab_space_alloc(cache, flags); |
200 | spinlock_lock(&cache->lock); |
199 | spinlock_lock(&cache->lock); |
201 | if (!slab) { |
200 | if (!slab) { |
202 | return NULL; |
201 | return NULL; |
203 | } |
202 | } |
204 | } else { |
203 | } else { |
205 | slab = list_get_instance(cache->partial_slabs.next, |
204 | slab = list_get_instance(cache->partial_slabs.next, |
206 | slab_t, |
205 | slab_t, |
207 | link); |
206 | link); |
208 | list_remove(&slab->link); |
207 | list_remove(&slab->link); |
209 | } |
208 | } |
210 | obj = slab->start + slab->nextavail * cache->size; |
209 | obj = slab->start + slab->nextavail * cache->size; |
211 | slab->nextavail = *((int *)obj); |
210 | slab->nextavail = *((int *)obj); |
212 | slab->available--; |
211 | slab->available--; |
213 | if (! slab->available) |
212 | if (! slab->available) |
214 | list_prepend(&slab->link, &cache->full_slabs); |
213 | list_prepend(&slab->link, &cache->full_slabs); |
215 | else |
214 | else |
216 | list_prepend(&slab->link, &cache->partial_slabs); |
215 | list_prepend(&slab->link, &cache->partial_slabs); |
217 | return obj; |
216 | return obj; |
218 | } |
217 | } |
219 | 218 | ||
220 | /**************************************/ |
219 | /**************************************/ |
221 | /* CPU-Cache slab functions */ |
220 | /* CPU-Cache slab functions */ |
222 | 221 | ||
223 | /** |
222 | /** |
224 | * Free all objects in magazine and free memory associated with magazine |
223 | * Free all objects in magazine and free memory associated with magazine |
225 | * |
224 | * |
226 | * Assume mag_cache[cpu].lock is locked |
225 | * Assume mag_cache[cpu].lock is locked |
227 | * |
226 | * |
228 | * @return Number of freed pages |
227 | * @return Number of freed pages |
229 | */ |
228 | */ |
230 | static count_t magazine_destroy(slab_cache_t *cache, |
229 | static count_t magazine_destroy(slab_cache_t *cache, |
231 | slab_magazine_t *mag) |
230 | slab_magazine_t *mag) |
232 | { |
231 | { |
233 | int i; |
232 | int i; |
234 | count_t frames = 0; |
233 | count_t frames = 0; |
235 | 234 | ||
236 | for (i=0;i < mag->busy; i++) { |
235 | for (i=0;i < mag->busy; i++) { |
237 | frames += slab_obj_destroy(cache, mag->objs[i], NULL); |
236 | frames += slab_obj_destroy(cache, mag->objs[i], NULL); |
238 | atomic_dec(&cache->cached_objs); |
237 | atomic_dec(&cache->cached_objs); |
239 | } |
238 | } |
240 | 239 | ||
241 | slab_free(&mag_cache, mag); |
240 | slab_free(&mag_cache, mag); |
242 | 241 | ||
243 | return frames; |
242 | return frames; |
244 | } |
243 | } |
245 | 244 | ||
246 | /** |
245 | /** |
247 | * Try to find object in CPU-cache magazines |
246 | * Try to find object in CPU-cache magazines |
248 | * |
247 | * |
249 | * @return Pointer to object or NULL if not available |
248 | * @return Pointer to object or NULL if not available |
250 | */ |
249 | */ |
251 | static void * magazine_obj_get(slab_cache_t *cache) |
250 | static void * magazine_obj_get(slab_cache_t *cache) |
252 | { |
251 | { |
253 | slab_magazine_t *mag; |
252 | slab_magazine_t *mag; |
254 | void *obj; |
253 | void *obj; |
255 | 254 | ||
256 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
255 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
257 | 256 | ||
258 | mag = cache->mag_cache[CPU->id].current; |
257 | mag = cache->mag_cache[CPU->id].current; |
259 | if (!mag) |
258 | if (!mag) |
260 | goto out; |
259 | goto out; |
261 | 260 | ||
262 | if (!mag->busy) { |
261 | if (!mag->busy) { |
263 | /* If current is empty && last exists && not empty, exchange */ |
262 | /* If current is empty && last exists && not empty, exchange */ |
264 | if (cache->mag_cache[CPU->id].last \ |
263 | if (cache->mag_cache[CPU->id].last \ |
265 | && cache->mag_cache[CPU->id].last->busy) { |
264 | && cache->mag_cache[CPU->id].last->busy) { |
266 | cache->mag_cache[CPU->id].current = cache->mag_cache[CPU->id].last; |
265 | cache->mag_cache[CPU->id].current = cache->mag_cache[CPU->id].last; |
267 | cache->mag_cache[CPU->id].last = mag; |
266 | cache->mag_cache[CPU->id].last = mag; |
268 | mag = cache->mag_cache[CPU->id].current; |
267 | mag = cache->mag_cache[CPU->id].current; |
269 | goto gotit; |
268 | goto gotit; |
270 | } |
269 | } |
271 | /* If still not busy, exchange current with some from |
270 | /* If still not busy, exchange current with some from |
272 | * other full magazines */ |
271 | * other full magazines */ |
273 | spinlock_lock(&cache->lock); |
272 | spinlock_lock(&cache->lock); |
274 | if (list_empty(&cache->magazines)) { |
273 | if (list_empty(&cache->magazines)) { |
275 | spinlock_unlock(&cache->lock); |
274 | spinlock_unlock(&cache->lock); |
276 | goto out; |
275 | goto out; |
277 | } |
276 | } |
278 | /* Free current magazine and take one from list */ |
277 | /* Free current magazine and take one from list */ |
279 | slab_free(&mag_cache, mag); |
278 | slab_free(&mag_cache, mag); |
- | 279 | ||
280 | mag = list_get_instance(cache->magazines.next, |
280 | mag = list_get_instance(cache->magazines.next, |
281 | slab_magazine_t, |
281 | slab_magazine_t, |
282 | link); |
282 | link); |
283 | list_remove(&mag->link); |
283 | list_remove(&mag->link); |
284 | 284 | ||
285 | spinlock_unlock(&cache->lock); |
285 | spinlock_unlock(&cache->lock); |
286 | } |
286 | } |
287 | gotit: |
287 | gotit: |
288 | obj = mag->objs[--mag->busy]; |
288 | obj = mag->objs[--mag->busy]; |
289 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
289 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
290 | atomic_dec(&cache->cached_objs); |
290 | atomic_dec(&cache->cached_objs); |
291 | 291 | ||
292 | return obj; |
292 | return obj; |
293 | out: |
293 | out: |
294 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
294 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
295 | return NULL; |
295 | return NULL; |
296 | } |
296 | } |
297 | 297 | ||
298 | /** |
298 | /** |
- | 299 | * Assure that the current magazine is empty, return pointer to it, or NULL if |
|
299 | * Put object into CPU-cache magazine |
300 | * no empty magazine available and cannot be allocated |
300 | * |
301 | * |
301 | * We have 2 magazines bound to processor. |
302 | * We have 2 magazines bound to processor. |
302 | * First try the current. |
303 | * First try the current. |
303 | * If full, try the last. |
304 | * If full, try the last. |
304 | * If full, put to magazines list. |
305 | * If full, put to magazines list. |
305 | * allocate new, exchange last & current |
306 | * allocate new, exchange last & current |
306 | * |
307 | * |
- | 308 | */ |
|
- | 309 | static slab_magazine_t * make_empty_current_mag(slab_cache_t *cache) |
|
- | 310 | { |
|
- | 311 | slab_magazine_t *cmag,*lastmag,*newmag; |
|
- | 312 | ||
- | 313 | cmag = cache->mag_cache[CPU->id].current; |
|
- | 314 | lastmag = cache->mag_cache[CPU->id].last; |
|
- | 315 | ||
- | 316 | if (cmag) { |
|
- | 317 | if (cmag->busy < cmag->size) |
|
- | 318 | return cmag; |
|
- | 319 | if (lastmag && lastmag->busy < lastmag->size) { |
|
- | 320 | cache->mag_cache[CPU->id].last = cmag; |
|
- | 321 | cache->mag_cache[CPU->id].current = lastmag; |
|
- | 322 | return lastmag; |
|
- | 323 | } |
|
- | 324 | } |
|
- | 325 | /* current | last are full | nonexistent, allocate new */ |
|
- | 326 | /* We do not want to sleep just because of caching */ |
|
- | 327 | /* Especially we do not want reclaiming to start, as |
|
- | 328 | * this would deadlock */ |
|
- | 329 | newmag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); |
|
- | 330 | if (!newmag) |
|
- | 331 | return NULL; |
|
- | 332 | newmag->size = SLAB_MAG_SIZE; |
|
- | 333 | newmag->busy = 0; |
|
- | 334 | ||
- | 335 | /* Flush last to magazine list */ |
|
- | 336 | if (lastmag) |
|
- | 337 | list_prepend(&lastmag->link, &cache->magazines); |
|
- | 338 | /* Move current as last, save new as current */ |
|
- | 339 | cache->mag_cache[CPU->id].last = cmag; |
|
- | 340 | cache->mag_cache[CPU->id].current = newmag; |
|
- | 341 | ||
- | 342 | return newmag; |
|
- | 343 | } |
|
- | 344 | ||
- | 345 | /** |
|
- | 346 | * Put object into CPU-cache magazine |
|
- | 347 | * |
|
307 | * @return 0 - success, -1 - could not get memory |
348 | * @return 0 - success, -1 - could not get memory |
308 | */ |
349 | */ |
309 | static int magazine_obj_put(slab_cache_t *cache, void *obj) |
350 | static int magazine_obj_put(slab_cache_t *cache, void *obj) |
310 | { |
351 | { |
311 | slab_magazine_t *mag; |
352 | slab_magazine_t *mag; |
312 | 353 | ||
313 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
354 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
- | 355 | ||
- | 356 | mag = make_empty_current_mag(cache); |
|
- | 357 | if (!mag) |
|
- | 358 | goto errout; |
|
314 | 359 | ||
315 | mag = cache->mag_cache[CPU->id].current; |
- | |
316 | if (!mag) { |
- | |
317 | /* We do not want to sleep just because of caching */ |
- | |
318 | /* Especially we do not want reclaiming to start, as |
- | |
319 | * this would deadlock */ |
- | |
320 | mag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); |
- | |
321 | if (!mag) /* Allocation failed, give up on caching */ |
- | |
322 | goto errout; |
- | |
323 | - | ||
324 | cache->mag_cache[CPU->id].current = mag; |
- | |
325 | mag->size = SLAB_MAG_SIZE; |
- | |
326 | mag->busy = 0; |
- | |
327 | } else if (mag->busy == mag->size) { |
- | |
328 | /* If the last is full | empty, allocate new */ |
- | |
329 | mag = cache->mag_cache[CPU->id].last; |
- | |
330 | if (!mag || mag->size == mag->busy) { |
- | |
331 | if (mag) |
- | |
332 | list_prepend(&mag->link, &cache->magazines); |
- | |
333 | - | ||
334 | mag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); |
- | |
335 | if (!mag) |
- | |
336 | goto errout; |
- | |
337 | - | ||
338 | mag->size = SLAB_MAG_SIZE; |
- | |
339 | mag->busy = 0; |
- | |
340 | cache->mag_cache[CPU->id].last = mag; |
- | |
341 | } |
- | |
342 | /* Exchange the 2 */ |
- | |
343 | cache->mag_cache[CPU->id].last = cache->mag_cache[CPU->id].current; |
- | |
344 | cache->mag_cache[CPU->id].current = mag; |
- | |
345 | } |
- | |
346 | mag->objs[mag->busy++] = obj; |
360 | mag->objs[mag->busy++] = obj; |
347 | 361 | ||
348 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
362 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
349 | atomic_inc(&cache->cached_objs); |
363 | atomic_inc(&cache->cached_objs); |
350 | return 0; |
364 | return 0; |
351 | errout: |
365 | errout: |
352 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
366 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
353 | return -1; |
367 | return -1; |
354 | } |
368 | } |
355 | 369 | ||
356 | 370 | ||
357 | /**************************************/ |
371 | /**************************************/ |
358 | /* SLAB CACHE functions */ |
372 | /* SLAB CACHE functions */ |
359 | 373 | ||
360 | /** Return number of objects that fit in certain cache size */ |
374 | /** Return number of objects that fit in certain cache size */ |
361 | static int comp_objects(slab_cache_t *cache) |
375 | static int comp_objects(slab_cache_t *cache) |
362 | { |
376 | { |
363 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
377 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
364 | return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size; |
378 | return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size; |
365 | else |
379 | else |
366 | return (PAGE_SIZE << cache->order) / cache->size; |
380 | return (PAGE_SIZE << cache->order) / cache->size; |
367 | } |
381 | } |
368 | 382 | ||
369 | /** Return wasted space in slab */ |
383 | /** Return wasted space in slab */ |
370 | static int badness(slab_cache_t *cache) |
384 | static int badness(slab_cache_t *cache) |
371 | { |
385 | { |
372 | int objects; |
386 | int objects; |
373 | int ssize; |
387 | int ssize; |
374 | 388 | ||
375 | objects = comp_objects(cache); |
389 | objects = comp_objects(cache); |
376 | ssize = PAGE_SIZE << cache->order; |
390 | ssize = PAGE_SIZE << cache->order; |
377 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
391 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
378 | ssize -= sizeof(slab_t); |
392 | ssize -= sizeof(slab_t); |
379 | return ssize - objects*cache->size; |
393 | return ssize - objects*cache->size; |
380 | } |
394 | } |
381 | 395 | ||
382 | /** Initialize allocated memory as a slab cache */ |
396 | /** Initialize allocated memory as a slab cache */ |
383 | static void |
397 | static void |
384 | _slab_cache_create(slab_cache_t *cache, |
398 | _slab_cache_create(slab_cache_t *cache, |
385 | char *name, |
399 | char *name, |
386 | size_t size, |
400 | size_t size, |
387 | size_t align, |
401 | size_t align, |
388 | int (*constructor)(void *obj, int kmflag), |
402 | int (*constructor)(void *obj, int kmflag), |
389 | void (*destructor)(void *obj), |
403 | void (*destructor)(void *obj), |
390 | int flags) |
404 | int flags) |
391 | { |
405 | { |
392 | int i; |
406 | int i; |
393 | 407 | ||
394 | memsetb((__address)cache, sizeof(*cache), 0); |
408 | memsetb((__address)cache, sizeof(*cache), 0); |
395 | cache->name = name; |
409 | cache->name = name; |
396 | 410 | ||
397 | if (align < sizeof(__native)) |
411 | if (align < sizeof(__native)) |
398 | align = sizeof(__native); |
412 | align = sizeof(__native); |
399 | size = ALIGN_UP(size, align); |
413 | size = ALIGN_UP(size, align); |
400 | 414 | ||
401 | cache->size = size; |
415 | cache->size = size; |
402 | 416 | ||
403 | cache->constructor = constructor; |
417 | cache->constructor = constructor; |
404 | cache->destructor = destructor; |
418 | cache->destructor = destructor; |
405 | cache->flags = flags; |
419 | cache->flags = flags; |
406 | 420 | ||
407 | list_initialize(&cache->full_slabs); |
421 | list_initialize(&cache->full_slabs); |
408 | list_initialize(&cache->partial_slabs); |
422 | list_initialize(&cache->partial_slabs); |
409 | list_initialize(&cache->magazines); |
423 | list_initialize(&cache->magazines); |
410 | spinlock_initialize(&cache->lock, "cachelock"); |
424 | spinlock_initialize(&cache->lock, "cachelock"); |
411 | if (! cache->flags & SLAB_CACHE_NOMAGAZINE) { |
425 | if (! (cache->flags & SLAB_CACHE_NOMAGAZINE)) { |
412 | for (i=0; i< config.cpu_count; i++) |
426 | for (i=0; i< config.cpu_count; i++) |
413 | spinlock_initialize(&cache->mag_cache[i].lock, |
427 | spinlock_initialize(&cache->mag_cache[i].lock, |
414 | "cpucachelock"); |
428 | "cpucachelock"); |
415 | } |
429 | } |
416 | 430 | ||
417 | /* Compute slab sizes, object counts in slabs etc. */ |
431 | /* Compute slab sizes, object counts in slabs etc. */ |
418 | if (cache->size < SLAB_INSIDE_SIZE) |
432 | if (cache->size < SLAB_INSIDE_SIZE) |
419 | cache->flags |= SLAB_CACHE_SLINSIDE; |
433 | cache->flags |= SLAB_CACHE_SLINSIDE; |
420 | 434 | ||
421 | /* Minimum slab order */ |
435 | /* Minimum slab order */ |
422 | cache->order = (cache->size-1) >> PAGE_WIDTH; |
436 | cache->order = (cache->size-1) >> PAGE_WIDTH; |
423 | 437 | ||
424 | while (badness(cache) > SLAB_MAX_BADNESS(cache)) { |
438 | while (badness(cache) > SLAB_MAX_BADNESS(cache)) { |
425 | cache->order += 1; |
439 | cache->order += 1; |
426 | } |
440 | } |
427 | cache->objects = comp_objects(cache); |
441 | cache->objects = comp_objects(cache); |
428 | /* If info fits in, put it inside */ |
442 | /* If info fits in, put it inside */ |
429 | if (badness(cache) > sizeof(slab_t)) |
443 | if (badness(cache) > sizeof(slab_t)) |
430 | cache->flags |= SLAB_CACHE_SLINSIDE; |
444 | cache->flags |= SLAB_CACHE_SLINSIDE; |
431 | 445 | ||
432 | spinlock_lock(&slab_cache_lock); |
446 | spinlock_lock(&slab_cache_lock); |
433 | 447 | ||
434 | list_append(&cache->link, &slab_cache_list); |
448 | list_append(&cache->link, &slab_cache_list); |
435 | 449 | ||
436 | spinlock_unlock(&slab_cache_lock); |
450 | spinlock_unlock(&slab_cache_lock); |
437 | } |
451 | } |
438 | 452 | ||
439 | /** Create slab cache */ |
453 | /** Create slab cache */ |
440 | slab_cache_t * slab_cache_create(char *name, |
454 | slab_cache_t * slab_cache_create(char *name, |
441 | size_t size, |
455 | size_t size, |
442 | size_t align, |
456 | size_t align, |
443 | int (*constructor)(void *obj, int kmflag), |
457 | int (*constructor)(void *obj, int kmflag), |
444 | void (*destructor)(void *obj), |
458 | void (*destructor)(void *obj), |
445 | int flags) |
459 | int flags) |
446 | { |
460 | { |
447 | slab_cache_t *cache; |
461 | slab_cache_t *cache; |
448 | 462 | ||
449 | cache = malloc(sizeof(*cache) + config.cpu_count*sizeof(cache->mag_cache[0])); |
463 | cache = malloc(sizeof(*cache) + config.cpu_count*sizeof(cache->mag_cache[0])); |
450 | _slab_cache_create(cache, name, size, align, constructor, destructor, |
464 | _slab_cache_create(cache, name, size, align, constructor, destructor, |
451 | flags); |
465 | flags); |
452 | return cache; |
466 | return cache; |
453 | } |
467 | } |
454 | 468 | ||
455 | /** |
469 | /** |
456 | * Reclaim space occupied by objects that are already free |
470 | * Reclaim space occupied by objects that are already free |
457 | * |
471 | * |
458 | * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing |
472 | * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing |
459 | * @return Number of freed pages |
473 | * @return Number of freed pages |
460 | * |
- | |
461 | * TODO: Add light reclaim |
- | |
462 | */ |
474 | */ |
463 | static count_t _slab_reclaim(slab_cache_t *cache, int flags) |
475 | static count_t _slab_reclaim(slab_cache_t *cache, int flags) |
464 | { |
476 | { |
465 | int i; |
477 | int i; |
466 | slab_magazine_t *mag; |
478 | slab_magazine_t *mag; |
467 | link_t *cur; |
479 | link_t *cur; |
468 | count_t frames = 0; |
480 | count_t frames = 0; |
469 | 481 | ||
470 | if (cache->flags & SLAB_CACHE_NOMAGAZINE) |
482 | if (cache->flags & SLAB_CACHE_NOMAGAZINE) |
471 | return 0; /* Nothing to do */ |
483 | return 0; /* Nothing to do */ |
472 | 484 | ||
473 | /* First lock all cpu caches, then the complete cache lock */ |
485 | /* First lock all cpu caches, then the complete cache lock */ |
474 | for (i=0; i < config.cpu_count; i++) |
486 | for (i=0; i < config.cpu_count; i++) |
475 | spinlock_lock(&cache->mag_cache[i].lock); |
487 | spinlock_lock(&cache->mag_cache[i].lock); |
476 | spinlock_lock(&cache->lock); |
488 | spinlock_lock(&cache->lock); |
477 | 489 | ||
478 | if (flags & SLAB_RECLAIM_ALL) { |
490 | if (flags & SLAB_RECLAIM_ALL) { |
479 | /* Aggressive memfree */ |
491 | /* Aggressive memfree */ |
480 | /* Destroy CPU magazines */ |
492 | /* Destroy CPU magazines */ |
481 | for (i=0; i<config.cpu_count; i++) { |
493 | for (i=0; i<config.cpu_count; i++) { |
482 | mag = cache->mag_cache[i].current; |
494 | mag = cache->mag_cache[i].current; |
483 | if (mag) |
495 | if (mag) |
484 | frames += magazine_destroy(cache, mag); |
496 | frames += magazine_destroy(cache, mag); |
485 | cache->mag_cache[i].current = NULL; |
497 | cache->mag_cache[i].current = NULL; |
486 | 498 | ||
487 | mag = cache->mag_cache[i].last; |
499 | mag = cache->mag_cache[i].last; |
488 | if (mag) |
500 | if (mag) |
489 | frames += magazine_destroy(cache, mag); |
501 | frames += magazine_destroy(cache, mag); |
490 | cache->mag_cache[i].last = NULL; |
502 | cache->mag_cache[i].last = NULL; |
491 | } |
503 | } |
492 | } |
504 | } |
493 | /* Destroy full magazines */ |
505 | /* Destroy full magazines */ |
494 | cur=cache->magazines.prev; |
506 | cur=cache->magazines.prev; |
495 | 507 | ||
496 | while (cur!=&cache->magazines) { |
508 | while (cur != &cache->magazines) { |
497 | mag = list_get_instance(cur, slab_magazine_t, link); |
509 | mag = list_get_instance(cur, slab_magazine_t, link); |
498 | 510 | ||
499 | cur = cur->prev; |
511 | cur = cur->prev; |
500 | list_remove(cur->next); |
- | |
501 | // list_remove(&mag->link); |
512 | list_remove(&mag->link); |
502 | frames += magazine_destroy(cache,mag); |
513 | frames += magazine_destroy(cache,mag); |
503 | /* If we do not do full reclaim, break |
514 | /* If we do not do full reclaim, break |
504 | * as soon as something is freed */ |
515 | * as soon as something is freed */ |
505 | if (!(flags & SLAB_RECLAIM_ALL) && frames) |
516 | if (!(flags & SLAB_RECLAIM_ALL) && frames) |
506 | break; |
517 | break; |
507 | } |
518 | } |
508 | 519 | ||
509 | spinlock_unlock(&cache->lock); |
520 | spinlock_unlock(&cache->lock); |
510 | for (i=0; i < config.cpu_count; i++) |
521 | for (i=0; i < config.cpu_count; i++) |
511 | spinlock_unlock(&cache->mag_cache[i].lock); |
522 | spinlock_unlock(&cache->mag_cache[i].lock); |
512 | 523 | ||
513 | return frames; |
524 | return frames; |
514 | } |
525 | } |
515 | 526 | ||
516 | /** Check that there are no slabs and remove cache from system */ |
527 | /** Check that there are no slabs and remove cache from system */ |
517 | void slab_cache_destroy(slab_cache_t *cache) |
528 | void slab_cache_destroy(slab_cache_t *cache) |
518 | { |
529 | { |
519 | /* Do not lock anything, we assume the software is correct and |
530 | /* Do not lock anything, we assume the software is correct and |
520 | * does not touch the cache when it decides to destroy it */ |
531 | * does not touch the cache when it decides to destroy it */ |
521 | 532 | ||
522 | /* Destroy all magazines */ |
533 | /* Destroy all magazines */ |
523 | _slab_reclaim(cache, SLAB_RECLAIM_ALL); |
534 | _slab_reclaim(cache, SLAB_RECLAIM_ALL); |
524 | 535 | ||
525 | /* All slabs must be empty */ |
536 | /* All slabs must be empty */ |
526 | if (!list_empty(&cache->full_slabs) \ |
537 | if (!list_empty(&cache->full_slabs) \ |
527 | || !list_empty(&cache->partial_slabs)) |
538 | || !list_empty(&cache->partial_slabs)) |
528 | panic("Destroying cache that is not empty."); |
539 | panic("Destroying cache that is not empty."); |
529 | 540 | ||
530 | spinlock_lock(&slab_cache_lock); |
541 | spinlock_lock(&slab_cache_lock); |
531 | list_remove(&cache->link); |
542 | list_remove(&cache->link); |
532 | spinlock_unlock(&slab_cache_lock); |
543 | spinlock_unlock(&slab_cache_lock); |
533 | 544 | ||
534 | free(cache); |
545 | free(cache); |
535 | } |
546 | } |
536 | 547 | ||
537 | /** Allocate new object from cache - if no flags given, always returns |
548 | /** Allocate new object from cache - if no flags given, always returns |
538 | memory */ |
549 | memory */ |
539 | void * slab_alloc(slab_cache_t *cache, int flags) |
550 | void * slab_alloc(slab_cache_t *cache, int flags) |
540 | { |
551 | { |
541 | ipl_t ipl; |
552 | ipl_t ipl; |
542 | void *result = NULL; |
553 | void *result = NULL; |
543 | 554 | ||
544 | /* Disable interrupts to avoid deadlocks with interrupt handlers */ |
555 | /* Disable interrupts to avoid deadlocks with interrupt handlers */ |
545 | ipl = interrupts_disable(); |
556 | ipl = interrupts_disable(); |
546 | 557 | ||
547 | if (!cache->flags & SLAB_CACHE_NOMAGAZINE) |
558 | if (!(cache->flags & SLAB_CACHE_NOMAGAZINE)) |
548 | result = magazine_obj_get(cache); |
559 | result = magazine_obj_get(cache); |
549 | 560 | ||
550 | if (!result) { |
561 | if (!result) { |
551 | spinlock_lock(&cache->lock); |
562 | spinlock_lock(&cache->lock); |
552 | result = slab_obj_create(cache, flags); |
563 | result = slab_obj_create(cache, flags); |
553 | spinlock_unlock(&cache->lock); |
564 | spinlock_unlock(&cache->lock); |
554 | } |
565 | } |
555 | 566 | ||
556 | if (result) |
567 | if (result) |
557 | atomic_inc(&cache->allocated_objs); |
568 | atomic_inc(&cache->allocated_objs); |
558 | 569 | ||
559 | interrupts_restore(ipl); |
570 | interrupts_restore(ipl); |
560 | 571 | ||
561 | 572 | ||
562 | return result; |
573 | return result; |
563 | } |
574 | } |
564 | 575 | ||
565 | /** Return object to cache */ |
576 | /** Return object to cache */ |
566 | void slab_free(slab_cache_t *cache, void *obj) |
577 | void slab_free(slab_cache_t *cache, void *obj) |
567 | { |
578 | { |
568 | ipl_t ipl; |
579 | ipl_t ipl; |
569 | 580 | ||
570 | ipl = interrupts_disable(); |
581 | ipl = interrupts_disable(); |
571 | 582 | ||
572 | if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \ |
583 | if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \ |
573 | || magazine_obj_put(cache, obj)) { |
584 | || magazine_obj_put(cache, obj)) { |
574 | 585 | ||
575 | spinlock_lock(&cache->lock); |
586 | spinlock_lock(&cache->lock); |
576 | slab_obj_destroy(cache, obj, NULL); |
587 | slab_obj_destroy(cache, obj, NULL); |
577 | spinlock_unlock(&cache->lock); |
588 | spinlock_unlock(&cache->lock); |
578 | } |
589 | } |
579 | atomic_dec(&cache->allocated_objs); |
590 | atomic_dec(&cache->allocated_objs); |
580 | interrupts_restore(ipl); |
591 | interrupts_restore(ipl); |
581 | } |
592 | } |
582 | 593 | ||
583 | /* Go through all caches and reclaim what is possible */ |
594 | /* Go through all caches and reclaim what is possible */ |
584 | count_t slab_reclaim(int flags) |
595 | count_t slab_reclaim(int flags) |
585 | { |
596 | { |
586 | slab_cache_t *cache; |
597 | slab_cache_t *cache; |
587 | link_t *cur; |
598 | link_t *cur; |
588 | count_t frames = 0; |
599 | count_t frames = 0; |
589 | 600 | ||
590 | spinlock_lock(&slab_cache_lock); |
601 | spinlock_lock(&slab_cache_lock); |
591 | 602 | ||
592 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
603 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
593 | cache = list_get_instance(cur, slab_cache_t, link); |
604 | cache = list_get_instance(cur, slab_cache_t, link); |
594 | frames += _slab_reclaim(cache, flags); |
605 | frames += _slab_reclaim(cache, flags); |
595 | } |
606 | } |
596 | 607 | ||
597 | spinlock_unlock(&slab_cache_lock); |
608 | spinlock_unlock(&slab_cache_lock); |
598 | 609 | ||
599 | return frames; |
610 | return frames; |
600 | } |
611 | } |
601 | 612 | ||
602 | 613 | ||
603 | /* Print list of slabs */ |
614 | /* Print list of slabs */ |
604 | void slab_print_list(void) |
615 | void slab_print_list(void) |
605 | { |
616 | { |
606 | slab_cache_t *cache; |
617 | slab_cache_t *cache; |
607 | link_t *cur; |
618 | link_t *cur; |
608 | 619 | ||
609 | spinlock_lock(&slab_cache_lock); |
620 | spinlock_lock(&slab_cache_lock); |
610 | printf("SLAB name\tOsize\tPages\tObj/pg\tSlabs\tCached\tAllocobjs\tCtl\n"); |
621 | printf("SLAB name\tOsize\tPages\tObj/pg\tSlabs\tCached\tAllocobjs\tCtl\n"); |
611 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
622 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
612 | cache = list_get_instance(cur, slab_cache_t, link); |
623 | cache = list_get_instance(cur, slab_cache_t, link); |
613 | printf("%s\t%d\t%d\t%d\t%d\t%d\t%d\t\t%s\n", cache->name, cache->size, |
624 | printf("%s\t%d\t%d\t%d\t%d\t%d\t%d\t\t%s\n", cache->name, cache->size, |
614 | (1 << cache->order), cache->objects, |
625 | (1 << cache->order), cache->objects, |
615 | atomic_get(&cache->allocated_slabs), |
626 | atomic_get(&cache->allocated_slabs), |
616 | atomic_get(&cache->cached_objs), |
627 | atomic_get(&cache->cached_objs), |
617 | atomic_get(&cache->allocated_objs), |
628 | atomic_get(&cache->allocated_objs), |
618 | cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out"); |
629 | cache->flags & SLAB_CACHE_SLINSIDE ? "In" : "Out"); |
619 | } |
630 | } |
620 | spinlock_unlock(&slab_cache_lock); |
631 | spinlock_unlock(&slab_cache_lock); |
621 | } |
632 | } |
622 | 633 | ||
623 | void slab_cache_init(void) |
634 | void slab_cache_init(void) |
624 | { |
635 | { |
625 | /* Initialize magazine cache */ |
636 | /* Initialize magazine cache */ |
626 | _slab_cache_create(&mag_cache, |
637 | _slab_cache_create(&mag_cache, |
627 | "slab_magazine", |
638 | "slab_magazine", |
628 | sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*), |
639 | sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*), |
629 | sizeof(__address), |
640 | sizeof(__address), |
630 | NULL, NULL, |
641 | NULL, NULL, |
631 | SLAB_CACHE_NOMAGAZINE); |
642 | SLAB_CACHE_NOMAGAZINE); |
632 | 643 | ||
633 | /* Initialize structures for malloc */ |
644 | /* Initialize structures for malloc */ |
634 | } |
645 | } |
635 | 646 |