Rev 762 | Rev 764 | Go to most recent revision | Only display areas with differences | Ignore whitespace | Details | Blame | Last modification | View Log | RSS feed
Rev 762 | Rev 763 | ||
---|---|---|---|
1 | /* |
1 | /* |
2 | * Copyright (C) 2006 Ondrej Palkovsky |
2 | * Copyright (C) 2006 Ondrej Palkovsky |
3 | * All rights reserved. |
3 | * All rights reserved. |
4 | * |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
7 | * are met: |
8 | * |
8 | * |
9 | * - Redistributions of source code must retain the above copyright |
9 | * - Redistributions of source code must retain the above copyright |
10 | * notice, this list of conditions and the following disclaimer. |
10 | * notice, this list of conditions and the following disclaimer. |
11 | * - Redistributions in binary form must reproduce the above copyright |
11 | * - Redistributions in binary form must reproduce the above copyright |
12 | * notice, this list of conditions and the following disclaimer in the |
12 | * notice, this list of conditions and the following disclaimer in the |
13 | * documentation and/or other materials provided with the distribution. |
13 | * documentation and/or other materials provided with the distribution. |
14 | * - The name of the author may not be used to endorse or promote products |
14 | * - The name of the author may not be used to endorse or promote products |
15 | * derived from this software without specific prior written permission. |
15 | * derived from this software without specific prior written permission. |
16 | * |
16 | * |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
20 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
21 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
22 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
23 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
24 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | */ |
27 | */ |
28 | 28 | ||
29 | #include <synch/spinlock.h> |
29 | #include <synch/spinlock.h> |
30 | #include <mm/slab.h> |
30 | #include <mm/slab.h> |
31 | #include <list.h> |
31 | #include <list.h> |
32 | #include <memstr.h> |
32 | #include <memstr.h> |
33 | #include <align.h> |
33 | #include <align.h> |
34 | #include <mm/heap.h> |
34 | #include <mm/heap.h> |
35 | #include <mm/frame.h> |
35 | #include <mm/frame.h> |
36 | #include <config.h> |
36 | #include <config.h> |
37 | #include <print.h> |
37 | #include <print.h> |
38 | #include <arch.h> |
38 | #include <arch.h> |
39 | #include <panic.h> |
39 | #include <panic.h> |
40 | #include <debug.h> |
40 | #include <debug.h> |
41 | 41 | ||
42 | SPINLOCK_INITIALIZE(slab_cache_lock); |
42 | SPINLOCK_INITIALIZE(slab_cache_lock); |
43 | LIST_INITIALIZE(slab_cache_list); |
43 | LIST_INITIALIZE(slab_cache_list); |
44 | 44 | ||
45 | slab_cache_t mag_cache; |
45 | slab_cache_t mag_cache; |
46 | 46 | ||
47 | 47 | ||
48 | typedef struct { |
48 | typedef struct { |
49 | slab_cache_t *cache; /**< Pointer to parent cache */ |
49 | slab_cache_t *cache; /**< Pointer to parent cache */ |
50 | link_t link; /* List of full/partial slabs */ |
50 | link_t link; /* List of full/partial slabs */ |
51 | void *start; /**< Start address of first available item */ |
51 | void *start; /**< Start address of first available item */ |
52 | count_t available; /**< Count of available items in this slab */ |
52 | count_t available; /**< Count of available items in this slab */ |
53 | index_t nextavail; /**< The index of next available item */ |
53 | index_t nextavail; /**< The index of next available item */ |
54 | }slab_t; |
54 | }slab_t; |
55 | 55 | ||
56 | /**************************************/ |
56 | /**************************************/ |
57 | /* SLAB allocation functions */ |
57 | /* SLAB allocation functions */ |
58 | 58 | ||
59 | /** |
59 | /** |
60 | * Allocate frames for slab space and initialize |
60 | * Allocate frames for slab space and initialize |
61 | * |
61 | * |
62 | * TODO: Change slab_t allocation to slab_alloc(????), malloc with flags!! |
62 | * TODO: Change slab_t allocation to slab_alloc(????), malloc with flags!! |
63 | */ |
63 | */ |
64 | static slab_t * slab_space_alloc(slab_cache_t *cache, int flags) |
64 | static slab_t * slab_space_alloc(slab_cache_t *cache, int flags) |
65 | { |
65 | { |
66 | void *data; |
66 | void *data; |
67 | slab_t *slab; |
67 | slab_t *slab; |
68 | size_t fsize; |
68 | size_t fsize; |
69 | int i; |
69 | int i; |
70 | zone_t *zone = NULL; |
70 | zone_t *zone = NULL; |
71 | int status; |
71 | int status; |
72 | 72 | ||
73 | data = (void *)frame_alloc(FRAME_KA | flags, cache->order, &status, &zone); |
73 | data = (void *)frame_alloc(FRAME_KA | flags, cache->order, &status, &zone); |
74 | if (status != FRAME_OK) |
74 | if (status != FRAME_OK) |
75 | return NULL; |
75 | return NULL; |
76 | 76 | ||
77 | if (! cache->flags & SLAB_CACHE_SLINSIDE) { |
77 | if (! cache->flags & SLAB_CACHE_SLINSIDE) { |
78 | slab = malloc(sizeof(*slab)); // , flags); |
78 | slab = malloc(sizeof(*slab)); // , flags); |
79 | if (!slab) { |
79 | if (!slab) { |
80 | frame_free((__address)data); |
80 | frame_free((__address)data); |
81 | return NULL; |
81 | return NULL; |
82 | } |
82 | } |
83 | } else { |
83 | } else { |
84 | fsize = (PAGE_SIZE << cache->order); |
84 | fsize = (PAGE_SIZE << cache->order); |
85 | slab = data + fsize - sizeof(*slab); |
85 | slab = data + fsize - sizeof(*slab); |
86 | } |
86 | } |
87 | 87 | ||
88 | /* Fill in slab structures */ |
88 | /* Fill in slab structures */ |
89 | /* TODO: some better way of accessing the frame, although |
89 | /* TODO: some better way of accessing the frame */ |
90 | * the optimizer might optimize the division out :-/ */ |
- | |
91 | for (i=0; i< (1<<cache->order); i++) { |
90 | for (i=0; i< (1<<cache->order); i++) { |
92 | ADDR2FRAME(zone, (__address)(data+i*PAGE_SIZE))->parent = slab; |
91 | ADDR2FRAME(zone, (__address)(data+i*PAGE_SIZE))->parent = slab; |
93 | } |
92 | } |
94 | 93 | ||
95 | slab->start = data; |
94 | slab->start = data; |
96 | slab->available = cache->objects; |
95 | slab->available = cache->objects; |
97 | slab->nextavail = 0; |
96 | slab->nextavail = 0; |
98 | 97 | ||
99 | for (i=0; i<cache->objects;i++) |
98 | for (i=0; i<cache->objects;i++) |
100 | *((int *) (slab->start + i*cache->size)) = i+1; |
99 | *((int *) (slab->start + i*cache->size)) = i+1; |
101 | return slab; |
100 | return slab; |
102 | } |
101 | } |
103 | 102 | ||
104 | /** |
103 | /** |
105 | * Free space associated with SLAB |
104 | * Free space associated with SLAB |
106 | * |
105 | * |
107 | * @return number of freed frames |
106 | * @return number of freed frames |
108 | */ |
107 | */ |
109 | static count_t slab_space_free(slab_cache_t *cache, slab_t *slab) |
108 | static count_t slab_space_free(slab_cache_t *cache, slab_t *slab) |
110 | { |
109 | { |
111 | frame_free((__address)slab->start); |
110 | frame_free((__address)slab->start); |
112 | if (! cache->flags & SLAB_CACHE_SLINSIDE) |
111 | if (! cache->flags & SLAB_CACHE_SLINSIDE) |
113 | free(slab); |
112 | free(slab); |
114 | return 1 << cache->order; |
113 | return 1 << cache->order; |
115 | } |
114 | } |
116 | 115 | ||
117 | /** Map object to slab structure */ |
116 | /** Map object to slab structure */ |
118 | static slab_t * obj2slab(void *obj) |
117 | static slab_t * obj2slab(void *obj) |
119 | { |
118 | { |
120 | frame_t *frame; |
119 | frame_t *frame; |
121 | 120 | ||
122 | frame = frame_addr2frame((__address)obj); |
121 | frame = frame_addr2frame((__address)obj); |
123 | return (slab_t *)frame->parent; |
122 | return (slab_t *)frame->parent; |
124 | } |
123 | } |
125 | 124 | ||
126 | /**************************************/ |
125 | /**************************************/ |
127 | /* SLAB functions */ |
126 | /* SLAB functions */ |
128 | 127 | ||
129 | 128 | ||
130 | /** |
129 | /** |
131 | * Return object to slab and call a destructor |
130 | * Return object to slab and call a destructor |
132 | * |
131 | * |
133 | * Assume the cache->lock is held; |
132 | * Assume the cache->lock is held; |
134 | * |
133 | * |
135 | * @param slab If the caller knows directly slab of the object, otherwise NULL |
134 | * @param slab If the caller knows directly slab of the object, otherwise NULL |
136 | * |
135 | * |
137 | * @return Number of freed pages |
136 | * @return Number of freed pages |
138 | */ |
137 | */ |
139 | static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, |
138 | static count_t slab_obj_destroy(slab_cache_t *cache, void *obj, |
140 | slab_t *slab) |
139 | slab_t *slab) |
141 | { |
140 | { |
142 | count_t frames = 0; |
141 | count_t frames = 0; |
143 | 142 | ||
144 | if (!slab) |
143 | if (!slab) |
145 | slab = obj2slab(obj); |
144 | slab = obj2slab(obj); |
146 | 145 | ||
147 | spinlock_lock(cache->lock); |
146 | spinlock_lock(&cache->lock); |
148 | 147 | ||
149 | *((int *)obj) = slab->nextavail; |
148 | *((int *)obj) = slab->nextavail; |
150 | slab->nextavail = (obj - slab->start)/cache->size; |
149 | slab->nextavail = (obj - slab->start)/cache->size; |
151 | slab->available++; |
150 | slab->available++; |
152 | 151 | ||
153 | /* Move it to correct list */ |
152 | /* Move it to correct list */ |
154 | if (slab->available == 1) { |
153 | if (slab->available == 1) { |
155 | /* It was in full, move to partial */ |
154 | /* It was in full, move to partial */ |
156 | list_remove(&slab->link); |
155 | list_remove(&slab->link); |
157 | list_prepend(&cache->partial_slabs, &slab->link); |
156 | list_prepend(&cache->partial_slabs, &slab->link); |
158 | } |
157 | } |
159 | if (slab->available == cache->objects) { |
158 | if (slab->available == cache->objects) { |
160 | /* Free associated memory */ |
159 | /* Free associated memory */ |
161 | list_remove(&slab->link); |
160 | list_remove(&slab->link); |
162 | /* Avoid deadlock */ |
161 | /* Avoid deadlock */ |
163 | spinlock_unlock(&cache->lock); |
162 | spinlock_unlock(&cache->lock); |
164 | frames = slab_space_free(cache, slab); |
163 | frames = slab_space_free(cache, slab); |
165 | spinlock_lock(&cache->lock); |
164 | spinlock_lock(&cache->lock); |
166 | } |
165 | } |
167 | 166 | ||
168 | spinlock_unlock(cache->lock); |
167 | spinlock_unlock(&cache->lock); |
169 | 168 | ||
170 | return frames; |
169 | return frames; |
171 | } |
170 | } |
172 | 171 | ||
173 | /** |
172 | /** |
174 | * Take new object from slab or create new if needed |
173 | * Take new object from slab or create new if needed |
175 | * |
174 | * |
176 | * Assume cache->lock is held. |
175 | * Assume cache->lock is held. |
177 | * |
176 | * |
178 | * @return Object address or null |
177 | * @return Object address or null |
179 | */ |
178 | */ |
180 | static void * slab_obj_create(slab_cache_t *cache, int flags) |
179 | static void * slab_obj_create(slab_cache_t *cache, int flags) |
181 | { |
180 | { |
182 | slab_t *slab; |
181 | slab_t *slab; |
183 | void *obj; |
182 | void *obj; |
184 | 183 | ||
185 | if (list_empty(&cache->partial_slabs)) { |
184 | if (list_empty(&cache->partial_slabs)) { |
186 | /* Allow recursion and reclaiming |
185 | /* Allow recursion and reclaiming |
187 | * - this should work, as the SLAB control structures |
186 | * - this should work, as the SLAB control structures |
188 | * are small and do not need to allocte with anything |
187 | * are small and do not need to allocte with anything |
189 | * other ten frame_alloc when they are allocating, |
188 | * other ten frame_alloc when they are allocating, |
190 | * that's why we should get recursion at most 1-level deep |
189 | * that's why we should get recursion at most 1-level deep |
191 | */ |
190 | */ |
192 | spinlock_unlock(&cache->lock); |
191 | spinlock_unlock(&cache->lock); |
193 | slab = slab_space_alloc(cache, flags); |
192 | slab = slab_space_alloc(cache, flags); |
194 | spinlock_lock(&cache->lock); |
193 | spinlock_lock(&cache->lock); |
195 | if (!slab) |
194 | if (!slab) |
196 | return NULL; |
195 | return NULL; |
197 | } else { |
196 | } else { |
198 | slab = list_get_instance(cache->partial_slabs.next, |
197 | slab = list_get_instance(cache->partial_slabs.next, |
199 | slab_t, |
198 | slab_t, |
200 | link); |
199 | link); |
201 | list_remove(&slab->link); |
200 | list_remove(&slab->link); |
202 | } |
201 | } |
203 | obj = slab->start + slab->nextavail * cache->size; |
202 | obj = slab->start + slab->nextavail * cache->size; |
204 | slab->nextavail = *((int *)obj); |
203 | slab->nextavail = *((int *)obj); |
205 | slab->available--; |
204 | slab->available--; |
206 | if (! slab->available) |
205 | if (! slab->available) |
207 | list_prepend(&cache->full_slabs, &slab->link); |
206 | list_prepend(&cache->full_slabs, &slab->link); |
208 | else |
207 | else |
209 | list_prepend(&cache->partial_slabs, &slab->link); |
208 | list_prepend(&cache->partial_slabs, &slab->link); |
210 | return obj; |
209 | return obj; |
211 | } |
210 | } |
212 | 211 | ||
213 | /**************************************/ |
212 | /**************************************/ |
214 | /* CPU-Cache slab functions */ |
213 | /* CPU-Cache slab functions */ |
215 | 214 | ||
216 | /** |
215 | /** |
217 | * Free all objects in magazine and free memory associated with magazine |
216 | * Free all objects in magazine and free memory associated with magazine |
218 | * |
217 | * |
219 | * Assume mag_cache[cpu].lock is locked |
218 | * Assume mag_cache[cpu].lock is locked |
220 | * |
219 | * |
221 | * @return Number of freed pages |
220 | * @return Number of freed pages |
222 | */ |
221 | */ |
223 | static count_t magazine_destroy(slab_cache_t *cache, |
222 | static count_t magazine_destroy(slab_cache_t *cache, |
224 | slab_magazine_t *mag) |
223 | slab_magazine_t *mag) |
225 | { |
224 | { |
226 | int i; |
225 | int i; |
227 | count_t frames = 0; |
226 | count_t frames = 0; |
228 | 227 | ||
229 | for (i=0;i < mag->busy; i++) |
228 | for (i=0;i < mag->busy; i++) |
230 | frames += slab_obj_destroy(cache, mag->objs[i], NULL); |
229 | frames += slab_obj_destroy(cache, mag->objs[i], NULL); |
231 | 230 | ||
232 | slab_free(&mag_cache, mag); |
231 | slab_free(&mag_cache, mag); |
233 | 232 | ||
234 | return frames; |
233 | return frames; |
235 | } |
234 | } |
236 | 235 | ||
237 | /** |
236 | /** |
238 | * Try to find object in CPU-cache magazines |
237 | * Try to find object in CPU-cache magazines |
239 | * |
238 | * |
240 | * @return Pointer to object or NULL if not available |
239 | * @return Pointer to object or NULL if not available |
241 | */ |
240 | */ |
242 | static void * magazine_obj_get(slab_cache_t *cache) |
241 | static void * magazine_obj_get(slab_cache_t *cache) |
243 | { |
242 | { |
244 | slab_magazine_t *mag; |
243 | slab_magazine_t *mag; |
245 | 244 | ||
246 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
245 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
247 | 246 | ||
248 | mag = cache->mag_cache[CPU->id].current; |
247 | mag = cache->mag_cache[CPU->id].current; |
249 | if (!mag) |
248 | if (!mag) |
250 | goto out; |
249 | goto out; |
251 | 250 | ||
252 | if (!mag->busy) { |
251 | if (!mag->busy) { |
253 | /* If current is empty && last exists && not empty, exchange */ |
252 | /* If current is empty && last exists && not empty, exchange */ |
254 | if (cache->mag_cache[CPU->id].last \ |
253 | if (cache->mag_cache[CPU->id].last \ |
255 | && cache->mag_cache[CPU->id].last->busy) { |
254 | && cache->mag_cache[CPU->id].last->busy) { |
256 | cache->mag_cache[CPU->id].current = cache->mag_cache[CPU->id].last; |
255 | cache->mag_cache[CPU->id].current = cache->mag_cache[CPU->id].last; |
257 | cache->mag_cache[CPU->id].last = mag; |
256 | cache->mag_cache[CPU->id].last = mag; |
258 | mag = cache->mag_cache[CPU->id].current; |
257 | mag = cache->mag_cache[CPU->id].current; |
259 | goto gotit; |
258 | goto gotit; |
260 | } |
259 | } |
261 | /* If still not busy, exchange current with some from |
260 | /* If still not busy, exchange current with some from |
262 | * other full magazines */ |
261 | * other full magazines */ |
263 | spinlock_lock(&cache->lock); |
262 | spinlock_lock(&cache->lock); |
264 | if (list_empty(&cache->magazines)) { |
263 | if (list_empty(&cache->magazines)) { |
265 | spinlock_unlock(&cache->lock); |
264 | spinlock_unlock(&cache->lock); |
266 | goto out; |
265 | goto out; |
267 | } |
266 | } |
268 | /* Free current magazine and take one from list */ |
267 | /* Free current magazine and take one from list */ |
269 | slab_free(&mag_cache, mag); |
268 | slab_free(&mag_cache, mag); |
270 | mag = list_get_instance(cache->magazines.next, |
269 | mag = list_get_instance(cache->magazines.next, |
271 | slab_magazine_t, |
270 | slab_magazine_t, |
272 | link); |
271 | link); |
273 | list_remove(&mag->link); |
272 | list_remove(&mag->link); |
274 | 273 | ||
275 | spinlock_unlock(&cache->lock); |
274 | spinlock_unlock(&cache->lock); |
276 | } |
275 | } |
277 | gotit: |
276 | gotit: |
278 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
277 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
279 | return mag->objs[--mag->busy]; |
278 | return mag->objs[--mag->busy]; |
280 | out: |
279 | out: |
281 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
280 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
282 | return NULL; |
281 | return NULL; |
283 | } |
282 | } |
284 | 283 | ||
285 | /** |
284 | /** |
286 | * Put object into CPU-cache magazine |
285 | * Put object into CPU-cache magazine |
287 | * |
286 | * |
288 | * We have 2 magazines bound to processor. |
287 | * We have 2 magazines bound to processor. |
289 | * First try the current. |
288 | * First try the current. |
290 | * If full, try the last. |
289 | * If full, try the last. |
291 | * If full, put to magazines list. |
290 | * If full, put to magazines list. |
292 | * allocate new, exchange last & current |
291 | * allocate new, exchange last & current |
293 | * |
292 | * |
294 | * @return 0 - success, -1 - could not get memory |
293 | * @return 0 - success, -1 - could not get memory |
295 | */ |
294 | */ |
296 | static int magazine_obj_put(slab_cache_t *cache, void *obj) |
295 | static int magazine_obj_put(slab_cache_t *cache, void *obj) |
297 | { |
296 | { |
298 | slab_magazine_t *mag; |
297 | slab_magazine_t *mag; |
299 | 298 | ||
300 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
299 | spinlock_lock(&cache->mag_cache[CPU->id].lock); |
301 | 300 | ||
302 | mag = cache->mag_cache[CPU->id].current; |
301 | mag = cache->mag_cache[CPU->id].current; |
303 | if (!mag) { |
302 | if (!mag) { |
304 | /* We do not want to sleep just because of caching */ |
303 | /* We do not want to sleep just because of caching */ |
305 | /* Especially we do not want reclaiming to start, as |
304 | /* Especially we do not want reclaiming to start, as |
306 | * this would deadlock */ |
305 | * this would deadlock */ |
307 | mag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); |
306 | mag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); |
308 | if (!mag) /* Allocation failed, give up on caching */ |
307 | if (!mag) /* Allocation failed, give up on caching */ |
309 | goto errout; |
308 | goto errout; |
310 | 309 | ||
311 | cache->mag_cache[CPU->id].current = mag; |
310 | cache->mag_cache[CPU->id].current = mag; |
312 | mag->size = SLAB_MAG_SIZE; |
311 | mag->size = SLAB_MAG_SIZE; |
313 | mag->busy = 0; |
312 | mag->busy = 0; |
314 | } else if (mag->busy == mag->size) { |
313 | } else if (mag->busy == mag->size) { |
315 | /* If the last is full | empty, allocate new */ |
314 | /* If the last is full | empty, allocate new */ |
316 | mag = cache->mag_cache[CPU->id].last; |
315 | mag = cache->mag_cache[CPU->id].last; |
317 | if (!mag || mag->size == mag->busy) { |
316 | if (!mag || mag->size == mag->busy) { |
318 | if (mag) |
317 | if (mag) |
319 | list_prepend(&cache->magazines, &mag->link); |
318 | list_prepend(&cache->magazines, &mag->link); |
320 | 319 | ||
321 | mag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); |
320 | mag = slab_alloc(&mag_cache, FRAME_ATOMIC | FRAME_NO_RECLAIM); |
322 | if (!mag) |
321 | if (!mag) |
323 | goto errout; |
322 | goto errout; |
324 | 323 | ||
325 | mag->size = SLAB_MAG_SIZE; |
324 | mag->size = SLAB_MAG_SIZE; |
326 | mag->busy = 0; |
325 | mag->busy = 0; |
327 | cache->mag_cache[CPU->id].last = mag; |
326 | cache->mag_cache[CPU->id].last = mag; |
328 | } |
327 | } |
329 | /* Exchange the 2 */ |
328 | /* Exchange the 2 */ |
330 | cache->mag_cache[CPU->id].last = cache->mag_cache[CPU->id].current; |
329 | cache->mag_cache[CPU->id].last = cache->mag_cache[CPU->id].current; |
331 | cache->mag_cache[CPU->id].current = mag; |
330 | cache->mag_cache[CPU->id].current = mag; |
332 | } |
331 | } |
333 | mag->objs[mag->busy++] = obj; |
332 | mag->objs[mag->busy++] = obj; |
334 | 333 | ||
335 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
334 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
336 | return 0; |
335 | return 0; |
337 | errout: |
336 | errout: |
338 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
337 | spinlock_unlock(&cache->mag_cache[CPU->id].lock); |
339 | return -1; |
338 | return -1; |
340 | } |
339 | } |
341 | 340 | ||
342 | 341 | ||
343 | /**************************************/ |
342 | /**************************************/ |
344 | /* SLAB CACHE functions */ |
343 | /* SLAB CACHE functions */ |
345 | 344 | ||
346 | /** Return number of objects that fit in certain cache size */ |
345 | /** Return number of objects that fit in certain cache size */ |
347 | static int comp_objects(slab_cache_t *cache) |
346 | static int comp_objects(slab_cache_t *cache) |
348 | { |
347 | { |
349 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
348 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
350 | return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size; |
349 | return ((PAGE_SIZE << cache->order) - sizeof(slab_t)) / cache->size; |
351 | else |
350 | else |
352 | return (PAGE_SIZE << cache->order) / cache->size; |
351 | return (PAGE_SIZE << cache->order) / cache->size; |
353 | } |
352 | } |
354 | 353 | ||
355 | /** Return wasted space in slab */ |
354 | /** Return wasted space in slab */ |
356 | static int badness(slab_cache_t *cache) |
355 | static int badness(slab_cache_t *cache) |
357 | { |
356 | { |
358 | int objects; |
357 | int objects; |
359 | int ssize; |
358 | int ssize; |
360 | 359 | ||
361 | objects = comp_objects(cache); |
360 | objects = comp_objects(cache); |
362 | ssize = PAGE_SIZE << cache->order; |
361 | ssize = PAGE_SIZE << cache->order; |
363 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
362 | if (cache->flags & SLAB_CACHE_SLINSIDE) |
364 | ssize -= sizeof(slab_t); |
363 | ssize -= sizeof(slab_t); |
365 | return ssize - objects*cache->size; |
364 | return ssize - objects*cache->size; |
366 | } |
365 | } |
367 | 366 | ||
368 | /** Initialize allocated memory as a slab cache */ |
367 | /** Initialize allocated memory as a slab cache */ |
369 | static void |
368 | static void |
370 | _slab_cache_create(slab_cache_t *cache, |
369 | _slab_cache_create(slab_cache_t *cache, |
371 | char *name, |
370 | char *name, |
372 | size_t size, |
371 | size_t size, |
373 | size_t align, |
372 | size_t align, |
374 | int (*constructor)(void *obj, int kmflag), |
373 | int (*constructor)(void *obj, int kmflag), |
375 | void (*destructor)(void *obj), |
374 | void (*destructor)(void *obj), |
376 | int flags) |
375 | int flags) |
377 | { |
376 | { |
378 | int i; |
377 | int i; |
379 | 378 | ||
380 | memsetb((__address)cache, sizeof(*cache), 0); |
379 | memsetb((__address)cache, sizeof(*cache), 0); |
381 | cache->name = name; |
380 | cache->name = name; |
382 | 381 | ||
383 | if (align) |
382 | if (align) |
384 | size = ALIGN_UP(size, align); |
383 | size = ALIGN_UP(size, align); |
385 | cache->size = size; |
384 | cache->size = size; |
386 | 385 | ||
387 | cache->constructor = constructor; |
386 | cache->constructor = constructor; |
388 | cache->destructor = destructor; |
387 | cache->destructor = destructor; |
389 | cache->flags = flags; |
388 | cache->flags = flags; |
390 | 389 | ||
391 | list_initialize(&cache->full_slabs); |
390 | list_initialize(&cache->full_slabs); |
392 | list_initialize(&cache->partial_slabs); |
391 | list_initialize(&cache->partial_slabs); |
393 | list_initialize(&cache->magazines); |
392 | list_initialize(&cache->magazines); |
394 | spinlock_initialize(&cache->lock, "cachelock"); |
393 | spinlock_initialize(&cache->lock, "cachelock"); |
395 | if (! cache->flags & SLAB_CACHE_NOMAGAZINE) { |
394 | if (! cache->flags & SLAB_CACHE_NOMAGAZINE) { |
396 | for (i=0; i< config.cpu_count; i++) |
395 | for (i=0; i< config.cpu_count; i++) |
397 | spinlock_initialize(&cache->mag_cache[i].lock, |
396 | spinlock_initialize(&cache->mag_cache[i].lock, |
398 | "cpucachelock"); |
397 | "cpucachelock"); |
399 | } |
398 | } |
400 | 399 | ||
401 | /* Compute slab sizes, object counts in slabs etc. */ |
400 | /* Compute slab sizes, object counts in slabs etc. */ |
402 | if (cache->size < SLAB_INSIDE_SIZE) |
401 | if (cache->size < SLAB_INSIDE_SIZE) |
403 | cache->flags |= SLAB_CACHE_SLINSIDE; |
402 | cache->flags |= SLAB_CACHE_SLINSIDE; |
404 | 403 | ||
405 | /* Minimum slab order */ |
404 | /* Minimum slab order */ |
406 | cache->order = (cache->size / PAGE_SIZE) + 1; |
405 | cache->order = (cache->size / PAGE_SIZE) + 1; |
407 | 406 | ||
408 | while (badness(cache) > SLAB_MAX_BADNESS(cache)) { |
407 | while (badness(cache) > SLAB_MAX_BADNESS(cache)) { |
409 | cache->order += 1; |
408 | cache->order += 1; |
410 | } |
409 | } |
411 | 410 | ||
412 | cache->objects = comp_objects(cache); |
411 | cache->objects = comp_objects(cache); |
413 | 412 | ||
414 | spinlock_lock(&slab_cache_lock); |
413 | spinlock_lock(&slab_cache_lock); |
415 | 414 | ||
416 | list_append(&cache->link, &slab_cache_list); |
415 | list_append(&cache->link, &slab_cache_list); |
417 | 416 | ||
418 | spinlock_unlock(&slab_cache_lock); |
417 | spinlock_unlock(&slab_cache_lock); |
419 | } |
418 | } |
420 | 419 | ||
421 | /** Create slab cache */ |
420 | /** Create slab cache */ |
422 | slab_cache_t * slab_cache_create(char *name, |
421 | slab_cache_t * slab_cache_create(char *name, |
423 | size_t size, |
422 | size_t size, |
424 | size_t align, |
423 | size_t align, |
425 | int (*constructor)(void *obj, int kmflag), |
424 | int (*constructor)(void *obj, int kmflag), |
426 | void (*destructor)(void *obj), |
425 | void (*destructor)(void *obj), |
427 | int flags) |
426 | int flags) |
428 | { |
427 | { |
429 | slab_cache_t *cache; |
428 | slab_cache_t *cache; |
430 | 429 | ||
431 | cache = malloc(sizeof(*cache) + config.cpu_count*sizeof(cache->mag_cache[0])); |
430 | cache = malloc(sizeof(*cache) + config.cpu_count*sizeof(cache->mag_cache[0])); |
432 | _slab_cache_create(cache, name, size, align, constructor, destructor, |
431 | _slab_cache_create(cache, name, size, align, constructor, destructor, |
433 | flags); |
432 | flags); |
434 | return cache; |
433 | return cache; |
435 | } |
434 | } |
436 | 435 | ||
437 | /** |
436 | /** |
438 | * Reclaim space occupied by objects that are already free |
437 | * Reclaim space occupied by objects that are already free |
439 | * |
438 | * |
440 | * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing |
439 | * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing |
441 | * @return Number of freed pages |
440 | * @return Number of freed pages |
442 | * |
441 | * |
443 | * TODO: Add light reclaim |
442 | * TODO: Add light reclaim |
444 | */ |
443 | */ |
445 | static count_t _slab_reclaim(slab_cache_t *cache, int flags) |
444 | static count_t _slab_reclaim(slab_cache_t *cache, int flags) |
446 | { |
445 | { |
447 | int i; |
446 | int i; |
448 | slab_magazine_t *mag; |
447 | slab_magazine_t *mag; |
449 | link_t *cur; |
448 | link_t *cur; |
450 | count_t frames = 0; |
449 | count_t frames = 0; |
451 | 450 | ||
452 | if (cache->flags & SLAB_CACHE_NOMAGAZINE) |
451 | if (cache->flags & SLAB_CACHE_NOMAGAZINE) |
453 | return 0; /* Nothing to do */ |
452 | return 0; /* Nothing to do */ |
454 | 453 | ||
455 | /* First lock all cpu caches, then the complete cache lock */ |
454 | /* First lock all cpu caches, then the complete cache lock */ |
456 | for (i=0; i < config.cpu_count; i++) |
455 | for (i=0; i < config.cpu_count; i++) |
457 | spinlock_lock(&cache->mag_cache[i].lock); |
456 | spinlock_lock(&cache->mag_cache[i].lock); |
458 | spinlock_lock(&cache->lock); |
457 | spinlock_lock(&cache->lock); |
459 | 458 | ||
460 | if (flags & SLAB_RECLAIM_ALL) { |
459 | if (flags & SLAB_RECLAIM_ALL) { |
461 | /* Aggressive memfree */ |
460 | /* Aggressive memfree */ |
462 | 461 | ||
463 | /* Destroy CPU magazines */ |
462 | /* Destroy CPU magazines */ |
464 | for (i=0; i<config.cpu_count; i++) { |
463 | for (i=0; i<config.cpu_count; i++) { |
465 | mag = cache->mag_cache[i].current; |
464 | mag = cache->mag_cache[i].current; |
466 | if (mag) |
465 | if (mag) |
467 | frames += magazine_destroy(cache, mag); |
466 | frames += magazine_destroy(cache, mag); |
468 | cache->mag_cache[i].current = NULL; |
467 | cache->mag_cache[i].current = NULL; |
469 | 468 | ||
470 | mag = cache->mag_cache[i].last; |
469 | mag = cache->mag_cache[i].last; |
471 | if (mag) |
470 | if (mag) |
472 | frames += magazine_destroy(cache, mag); |
471 | frames += magazine_destroy(cache, mag); |
473 | cache->mag_cache[i].last = NULL; |
472 | cache->mag_cache[i].last = NULL; |
474 | } |
473 | } |
475 | } |
474 | } |
476 | /* Destroy full magazines */ |
475 | /* Destroy full magazines */ |
477 | cur=cache->magazines.prev; |
476 | cur=cache->magazines.prev; |
478 | while (cur!=&cache->magazines) { |
477 | while (cur!=&cache->magazines) { |
479 | mag = list_get_instance(cur, slab_magazine_t, link); |
478 | mag = list_get_instance(cur, slab_magazine_t, link); |
480 | 479 | ||
481 | cur = cur->prev; |
480 | cur = cur->prev; |
482 | list_remove(cur->next); |
481 | list_remove(cur->next); |
483 | frames += magazine_destroy(cache,mag); |
482 | frames += magazine_destroy(cache,mag); |
484 | /* If we do not do full reclaim, break |
483 | /* If we do not do full reclaim, break |
485 | * as soon as something is freed */ |
484 | * as soon as something is freed */ |
486 | if (!(flags & SLAB_RECLAIM_ALL) && frames) |
485 | if (!(flags & SLAB_RECLAIM_ALL) && frames) |
487 | break; |
486 | break; |
488 | } |
487 | } |
489 | 488 | ||
490 | spinlock_unlock(&cache->lock); |
489 | spinlock_unlock(&cache->lock); |
491 | for (i=0; i < config.cpu_count; i++) |
490 | for (i=0; i < config.cpu_count; i++) |
492 | spinlock_unlock(&cache->mag_cache[i].lock); |
491 | spinlock_unlock(&cache->mag_cache[i].lock); |
493 | 492 | ||
494 | return frames; |
493 | return frames; |
495 | } |
494 | } |
496 | 495 | ||
497 | /** Check that there are no slabs and remove cache from system */ |
496 | /** Check that there are no slabs and remove cache from system */ |
498 | void slab_cache_destroy(slab_cache_t *cache) |
497 | void slab_cache_destroy(slab_cache_t *cache) |
499 | { |
498 | { |
500 | /* Do not lock anything, we assume the software is correct and |
499 | /* Do not lock anything, we assume the software is correct and |
501 | * does not touch the cache when it decides to destroy it */ |
500 | * does not touch the cache when it decides to destroy it */ |
502 | 501 | ||
503 | /* Destroy all magazines */ |
502 | /* Destroy all magazines */ |
504 | _slab_reclaim(cache, SLAB_RECLAIM_ALL); |
503 | _slab_reclaim(cache, SLAB_RECLAIM_ALL); |
505 | 504 | ||
506 | /* All slabs must be empty */ |
505 | /* All slabs must be empty */ |
507 | if (!list_empty(&cache->full_slabs) \ |
506 | if (!list_empty(&cache->full_slabs) \ |
508 | || !list_empty(&cache->partial_slabs)) |
507 | || !list_empty(&cache->partial_slabs)) |
509 | panic("Destroying cache that is not empty."); |
508 | panic("Destroying cache that is not empty."); |
510 | 509 | ||
511 | spinlock_lock(&slab_cache_lock); |
510 | spinlock_lock(&slab_cache_lock); |
512 | list_remove(&cache->link); |
511 | list_remove(&cache->link); |
513 | spinlock_unlock(&slab_cache_lock); |
512 | spinlock_unlock(&slab_cache_lock); |
514 | 513 | ||
515 | free(cache); |
514 | free(cache); |
516 | } |
515 | } |
517 | 516 | ||
518 | /** Allocate new object from cache - if no flags given, always returns |
517 | /** Allocate new object from cache - if no flags given, always returns |
519 | memory */ |
518 | memory */ |
520 | void * slab_alloc(slab_cache_t *cache, int flags) |
519 | void * slab_alloc(slab_cache_t *cache, int flags) |
521 | { |
520 | { |
522 | ipl_t ipl; |
521 | ipl_t ipl; |
523 | void *result = NULL; |
522 | void *result = NULL; |
524 | 523 | ||
525 | /* Disable interrupts to avoid deadlocks with interrupt handlers */ |
524 | /* Disable interrupts to avoid deadlocks with interrupt handlers */ |
526 | ipl = interrupts_disable(); |
525 | ipl = interrupts_disable(); |
527 | 526 | ||
528 | if (!cache->flags & SLAB_CACHE_NOMAGAZINE) |
527 | if (!cache->flags & SLAB_CACHE_NOMAGAZINE) |
529 | result = magazine_obj_get(cache); |
528 | result = magazine_obj_get(cache); |
530 | 529 | ||
531 | if (!result) { |
530 | if (!result) { |
532 | spinlock_lock(&cache->lock); |
531 | spinlock_lock(&cache->lock); |
533 | result = slab_obj_create(cache, flags); |
532 | result = slab_obj_create(cache, flags); |
534 | spinlock_unlock(&cache->lock); |
533 | spinlock_unlock(&cache->lock); |
535 | } |
534 | } |
536 | 535 | ||
537 | interrupts_restore(ipl); |
536 | interrupts_restore(ipl); |
538 | 537 | ||
539 | return result; |
538 | return result; |
540 | } |
539 | } |
541 | 540 | ||
542 | /** Return object to cache */ |
541 | /** Return object to cache */ |
543 | void slab_free(slab_cache_t *cache, void *obj) |
542 | void slab_free(slab_cache_t *cache, void *obj) |
544 | { |
543 | { |
545 | ipl_t ipl; |
544 | ipl_t ipl; |
546 | 545 | ||
547 | ipl = interrupts_disable(); |
546 | ipl = interrupts_disable(); |
548 | 547 | ||
549 | if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \ |
548 | if ((cache->flags & SLAB_CACHE_NOMAGAZINE) \ |
550 | || magazine_obj_put(cache, obj)) { |
549 | || magazine_obj_put(cache, obj)) { |
551 | 550 | ||
552 | spinlock_lock(&cache->lock); |
551 | spinlock_lock(&cache->lock); |
553 | slab_obj_destroy(cache, obj, NULL); |
552 | slab_obj_destroy(cache, obj, NULL); |
554 | spinlock_unlock(&cache->lock); |
553 | spinlock_unlock(&cache->lock); |
555 | } |
554 | } |
556 | interrupts_restore(ipl); |
555 | interrupts_restore(ipl); |
557 | } |
556 | } |
558 | 557 | ||
559 | /* Go through all caches and reclaim what is possible */ |
558 | /* Go through all caches and reclaim what is possible */ |
560 | count_t slab_reclaim(int flags) |
559 | count_t slab_reclaim(int flags) |
561 | { |
560 | { |
562 | slab_cache_t *cache; |
561 | slab_cache_t *cache; |
563 | link_t *cur; |
562 | link_t *cur; |
564 | count_t frames = 0; |
563 | count_t frames = 0; |
565 | 564 | ||
566 | spinlock_lock(&slab_cache_lock); |
565 | spinlock_lock(&slab_cache_lock); |
567 | 566 | ||
568 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
567 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
569 | cache = list_get_instance(cur, slab_cache_t, link); |
568 | cache = list_get_instance(cur, slab_cache_t, link); |
570 | frames += _slab_reclaim(cache, flags); |
569 | frames += _slab_reclaim(cache, flags); |
571 | } |
570 | } |
572 | 571 | ||
573 | spinlock_unlock(&slab_cache_lock); |
572 | spinlock_unlock(&slab_cache_lock); |
574 | 573 | ||
575 | return frames; |
574 | return frames; |
576 | } |
575 | } |
577 | 576 | ||
578 | 577 | ||
579 | /* Print list of slabs */ |
578 | /* Print list of slabs */ |
580 | void slab_print_list(void) |
579 | void slab_print_list(void) |
581 | { |
580 | { |
582 | slab_cache_t *cache; |
581 | slab_cache_t *cache; |
583 | link_t *cur; |
582 | link_t *cur; |
584 | 583 | ||
585 | spinlock_lock(&slab_cache_lock); |
584 | spinlock_lock(&slab_cache_lock); |
586 | printf("SLAB name\tOsize\tOrder\n"); |
585 | printf("SLAB name\tOsize\tOrder\n"); |
587 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
586 | for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) { |
588 | cache = list_get_instance(cur, slab_cache_t, link); |
587 | cache = list_get_instance(cur, slab_cache_t, link); |
589 | printf("%s\t%d\t%d\n", cache->name, cache->size, cache->order); |
588 | printf("%s\t%d\t%d\n", cache->name, cache->size, cache->order); |
590 | } |
589 | } |
591 | spinlock_unlock(&slab_cache_lock); |
590 | spinlock_unlock(&slab_cache_lock); |
592 | } |
591 | } |
593 | 592 | ||
594 | void slab_cache_init(void) |
593 | void slab_cache_init(void) |
595 | { |
594 | { |
596 | /* Initialize magazine cache */ |
595 | /* Initialize magazine cache */ |
597 | _slab_cache_create(&mag_cache, |
596 | _slab_cache_create(&mag_cache, |
598 | "slab_magazine", |
597 | "slab_magazine", |
599 | sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*), |
598 | sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*), |
600 | sizeof(__address), |
599 | sizeof(__address), |
601 | NULL, NULL, |
600 | NULL, NULL, |
602 | SLAB_CACHE_NOMAGAZINE); |
601 | SLAB_CACHE_NOMAGAZINE); |
603 | 602 | ||
604 | /* Initialize structures for malloc */ |
603 | /* Initialize structures for malloc */ |
605 | } |
604 | } |
606 | 605 |