Subversion Repositories HelenOS-historic

Rev

Rev 762 | Go to most recent revision | Details | Last modification | View Log | RSS feed

Rev Author Line No. Line
759 palkovsky 1
/*
2
 * Copyright (C) 2006 Ondrej Palkovsky
3
 * All rights reserved.
4
 *
5
 * Redistribution and use in source and binary forms, with or without
6
 * modification, are permitted provided that the following conditions
7
 * are met:
8
 *
9
 * - Redistributions of source code must retain the above copyright
10
 *   notice, this list of conditions and the following disclaimer.
11
 * - Redistributions in binary form must reproduce the above copyright
12
 *   notice, this list of conditions and the following disclaimer in the
13
 *   documentation and/or other materials provided with the distribution.
14
 * - The name of the author may not be used to endorse or promote products
15
 *   derived from this software without specific prior written permission.
16
 *
17
 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18
 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19
 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20
 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21
 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22
 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26
 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
 */
28
 
29
#include <synch/spinlock.h>
30
#include <mm/slab.h>
31
#include <list.h>
32
#include <memstr.h>
33
#include <align.h>
34
#include <mm/heap.h>
35
#include <config.h>
36
#include <print.h>
37
#include <arch.h>
38
#include <panic.h>
39
 
40
SPINLOCK_INITIALIZE(slab_cache_lock);
41
LIST_INITIALIZE(slab_cache_list);
42
 
43
slab_cache_t mag_cache;
44
 
45
/**************************************/
46
/* SLAB low level functions */
47
 
48
 
49
/**
50
 * Return object to slab and call a destructor
51
 *
52
 * @return Number of freed pages
53
 */
54
static count_t slab_obj_destroy(slab_cache_t *cache, void *obj)
55
{
56
	return 0;
57
}
58
 
59
 
60
/**
61
 * Take new object from slab or create new if needed
62
 *
63
 * @return Object address or null
64
 */
65
static void * slab_obj_create(slab_cache_t *cache, int flags)
66
{
67
	return NULL;
68
}
69
 
70
/**************************************/
71
/* CPU-Cache slab functions */
72
 
73
/**
74
 * Free all objects in magazine and free memory associated with magazine
75
 *
76
 * Assume cpu->lock is locked 
77
 *
78
 * @return Number of freed pages
79
 */
80
static count_t magazine_destroy(slab_cache_t *cache, 
81
				slab_magazine_t *mag)
82
{
83
	int i;
84
	count_t frames = 0;
85
 
86
	for (i=0;i < mag->busy; i++)
87
		frames += slab_obj_destroy(cache, mag->objs[i]);
88
 
89
	slab_free(&mag_cache, mag);
90
 
91
	return frames;
92
}
93
 
94
/**
95
 * Try to find object in CPU-cache magazines
96
 *
97
 * @return Pointer to object or NULL if not available
98
 */
99
static void * magazine_obj_get(slab_cache_t *cache)
100
{
101
	slab_magazine_t *mag;
102
 
103
	spinlock_lock(&cache->mag_cache[CPU->id].lock);
104
 
105
	mag = cache->mag_cache[CPU->id].current;
106
	if (!mag)
107
		goto out;
108
 
109
	if (!mag->busy) {
110
		/* If current is empty && last exists && not empty, exchange */
111
		if (cache->mag_cache[CPU->id].last \
112
		    && cache->mag_cache[CPU->id].last->busy) {
113
			cache->mag_cache[CPU->id].current = cache->mag_cache[CPU->id].last;
114
			cache->mag_cache[CPU->id].last = mag;
115
			mag = cache->mag_cache[CPU->id].current;
116
			goto gotit;
117
		}
118
		/* If still not busy, exchange current with some frome
119
		 * other full magazines */
120
		spinlock_lock(&cache->lock);
121
		if (list_empty(&cache->magazines)) {
122
			spinlock_unlock(&cache->lock);
123
			goto out;
124
		}
125
		/* Free current magazine and take one from list */
126
		slab_free(&mag_cache, mag);
127
		mag = list_get_instance(cache->magazines.next,
128
					slab_magazine_t,
129
					link);
130
		list_remove(&mag->link);
131
 
132
		spinlock_unlock(&cache->lock);
133
	}
134
gotit:
135
	spinlock_unlock(&cache->mag_cache[CPU->id].lock);
136
	return mag->objs[--mag->busy];
137
out:	
138
	spinlock_unlock(&cache->mag_cache[CPU->id].lock);
139
	return NULL;
140
}
141
 
142
/**
143
 * Put object into CPU-cache magazine
144
 *
145
 * We have 2 magazines bound to processor. 
146
 * First try the current. 
147
 *  If full, try the last.
148
 *   If full, put to magazines list.
149
 *   allocate new, exchange last & current
150
 *
151
 * @return 0 - success, -1 - could not get memory
152
 */
153
static int magazine_obj_put(slab_cache_t *cache, void *obj)
154
{
155
	slab_magazine_t *mag;
156
 
157
	spinlock_lock(&cache->mag_cache[CPU->id].lock);
158
 
159
	mag = cache->mag_cache[CPU->id].current;
160
	if (!mag) {
161
		/* We do not want to sleep just because of caching */
162
		/* Especially we do not want reclaiming to start, as 
163
		 * this would deadlock */
164
		mag = slab_alloc(&mag_cache, SLAB_ATOMIC | SLAB_NO_RECLAIM);
165
		if (!mag) /* Allocation failed, give up on caching */
166
			goto errout;
167
 
168
		cache->mag_cache[CPU->id].current = mag;
169
		mag->size = SLAB_MAG_SIZE;
170
		mag->busy = 0;
171
	} else if (mag->busy == mag->size) {
172
		/* If the last is full | empty, allocate new */
173
		mag = cache->mag_cache[CPU->id].last;
174
		if (!mag || mag->size == mag->busy) {
175
			if (mag) 
176
				list_prepend(&cache->magazines, &mag->link);
177
 
178
			mag = slab_alloc(&mag_cache, SLAB_ATOMIC | SLAB_NO_RECLAIM);
179
			if (!mag)
180
				goto errout;
181
 
182
			mag->size = SLAB_MAG_SIZE;
183
			mag->busy = 0;
184
			cache->mag_cache[CPU->id].last = mag;
185
		} 
186
		/* Exchange the 2 */
187
		cache->mag_cache[CPU->id].last = cache->mag_cache[CPU->id].current;
188
		cache->mag_cache[CPU->id].current = mag;
189
	}
190
	mag->objs[mag->busy++] = obj;
191
 
192
	spinlock_unlock(&cache->mag_cache[CPU->id].lock);
193
	return 0;
194
errout:
195
	spinlock_unlock(&cache->mag_cache[CPU->id].lock);
196
	return -1;
197
}
198
 
199
 
200
/**************************************/
201
/* Top level SLAB functions */
202
 
203
/** Initialize allocated memory as a slab cache */
204
static void
205
_slab_cache_create(slab_cache_t *cache,
206
		   char *name,
207
		   size_t size,
208
		   size_t align,
209
		   int (*constructor)(void *obj, int kmflag),
210
		   void (*destructor)(void *obj),
211
		   int flags)
212
{
213
	int i;
214
 
215
	memsetb((__address)cache, sizeof(*cache), 0);
216
	cache->name = name;
217
	cache->align = align;
218
 
219
	cache->size = ALIGN_UP(size, align);
220
 
221
	cache->constructor = constructor;
222
	cache->destructor = destructor;
223
	cache->flags = flags;
224
 
225
	list_initialize(&cache->full_slabs);
226
	list_initialize(&cache->partial_slabs);
227
	list_initialize(&cache->magazines);
228
	spinlock_initialize(&cache->lock, "cachelock");
229
	if (! cache->flags & SLAB_CACHE_NOMAGAZINE) {
230
		for (i=0; i< config.cpu_count; i++)
231
			spinlock_initialize(&cache->mag_cache[i].lock, 
232
					    "cpucachelock");
233
	}
234
 
235
	/* Compute slab sizes, object counts in slabs etc. */
236
	if (cache->size < SLAB_INSIDE_SIZE)
237
		cache->flags |= SLAB_CACHE_SLINSIDE;
238
 
239
 
240
 
241
	spinlock_lock(&slab_cache_lock);
242
 
243
	list_append(&cache->link, &slab_cache_list);
244
 
245
	spinlock_unlock(&slab_cache_lock);
246
}
247
 
248
/** Create slab cache  */
249
slab_cache_t * slab_cache_create(char *name,
250
				 size_t size,
251
				 size_t align,
252
				 int (*constructor)(void *obj, int kmflag),
253
				 void (*destructor)(void *obj),
254
				 int flags)
255
{
256
	slab_cache_t *cache;
257
 
258
	cache = malloc(sizeof(*cache) + config.cpu_count*sizeof(cache->mag_cache[0]));
259
	_slab_cache_create(cache, name, size, align, constructor, destructor,
260
			   flags);
261
	return cache;
262
}
263
 
264
/** 
265
 * Reclaim space occupied by objects that are already free
266
 *
267
 * @param flags If contains SLAB_RECLAIM_ALL, do aggressive freeing
268
 * @return Number of freed pages
269
 */
270
static count_t _slab_reclaim(slab_cache_t *cache, int flags)
271
{
272
	int i;
273
	slab_magazine_t *mag;
274
	link_t *cur;
275
	count_t frames = 0;
276
 
277
	if (cache->flags & SLAB_CACHE_NOMAGAZINE)
278
		return 0; /* Nothing to do */
279
 
280
	/* First lock all cpu caches, then the complete cache lock */
281
	for (i=0; i < config.cpu_count; i++)
282
		spinlock_lock(&cache->mag_cache[i].lock);
283
	spinlock_lock(&cache->lock);
284
 
285
	if (flags & SLAB_RECLAIM_ALL) {
286
		/* Destroy CPU magazines */
287
		for (i=0; i<config.cpu_count; i++) {
288
			mag = cache->mag_cache[i].current;
289
			if (mag)
290
				frames += magazine_destroy(cache, mag);
291
			cache->mag_cache[i].current = NULL;
292
 
293
			mag = cache->mag_cache[i].last;
294
			if (mag)
295
				frames += magazine_destroy(cache, mag);
296
			cache->mag_cache[i].last = NULL;
297
		}
298
		/* Destroy full magazines */
299
		cur=cache->magazines.next;
300
		while (cur!=&cache->magazines) {
301
			mag = list_get_instance(cur, slab_magazine_t, link);
302
 
303
			cur = cur->next;
304
			list_remove(cur->prev);
305
			frames += magazine_destroy(cache,mag);
306
		}
307
	}
308
 
309
	spinlock_unlock(&cache->lock);
310
	for (i=0; i < config.cpu_count; i++)
311
		spinlock_unlock(&cache->mag_cache[i].lock);
312
 
313
	return frames;
314
}
315
 
316
/** Check that there are no slabs and remove cache from system  */
317
void slab_cache_destroy(slab_cache_t *cache)
318
{
319
	/* Do not lock anything, we assume the software is correct and
320
	 * does not touch the cache when it decides to destroy it */
321
 
322
	/* Destroy all magazines */
323
	_slab_reclaim(cache, SLAB_RECLAIM_ALL);
324
 
325
	/* All slabs must be empty */
326
	if (!list_empty(&cache->full_slabs) \
327
	    || !list_empty(&cache->partial_slabs))
328
		panic("Destroying cache that is not empty.");
329
 
330
	spinlock_lock(&slab_cache_lock);
331
	list_remove(&cache->link);
332
	spinlock_unlock(&slab_cache_lock);
333
 
334
	free(cache);
335
}
336
 
337
/** Allocate new object from cache - if no flags given, always returns 
338
    memory */
339
void * slab_alloc(slab_cache_t *cache, int flags)
340
{
341
	ipl_t ipl;
342
	void *result = NULL;
343
 
344
	/* Disable interrupts to avoid deadlocks with interrupt handlers */
345
	ipl = interrupts_disable();
346
 
347
	if (!cache->flags & SLAB_CACHE_NOMAGAZINE)
348
		result = magazine_obj_get(cache);
349
 
350
	if (!result)
351
		result = slab_obj_create(cache, flags);
352
 
353
	interrupts_restore(ipl);
354
 
355
	return result;
356
}
357
 
358
/** Return object to cache  */
359
void slab_free(slab_cache_t *cache, void *obj)
360
{
361
	ipl_t ipl;
362
 
363
	ipl = interrupts_disable();
364
 
365
	if (cache->flags & SLAB_CACHE_NOMAGAZINE)
366
		slab_obj_destroy(cache, obj);
367
	else {
368
		if (magazine_obj_put(cache, obj)) /* If magazine put failed */
369
			slab_obj_destroy(cache, obj);
370
	}
371
	interrupts_restore(ipl);
372
}
373
 
374
/* Go through all caches and reclaim what is possible */
375
count_t slab_reclaim(int flags)
376
{
377
	slab_cache_t *cache;
378
	link_t *cur;
379
	count_t frames = 0;
380
 
381
	spinlock_lock(&slab_cache_lock);
382
 
383
	for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
384
		cache = list_get_instance(cur, slab_cache_t, link);
385
		frames += _slab_reclaim(cache, flags);
386
	}
387
 
388
	spinlock_unlock(&slab_cache_lock);
389
 
390
	return frames;
391
}
392
 
393
 
394
/* Print list of slabs */
395
void slab_print_list(void)
396
{
397
	slab_cache_t *cache;
398
	link_t *cur;
399
 
400
	spinlock_lock(&slab_cache_lock);
401
	printf("SLAB name\tObj size\n");
402
	for (cur = slab_cache_list.next;cur!=&slab_cache_list; cur=cur->next) {
403
		cache = list_get_instance(cur, slab_cache_t, link);
404
		printf("%s\t%d\n", cache->name, cache->size);
405
	}
406
	spinlock_unlock(&slab_cache_lock);
407
}
408
 
409
void slab_cache_init(void)
410
{
411
	/* Initialize magazine cache */
412
	_slab_cache_create(&mag_cache,
413
			   "slab_magazine",
414
			   sizeof(slab_magazine_t)+SLAB_MAG_SIZE*sizeof(void*),
415
			   sizeof(__address),
416
			   NULL, NULL,
417
			   SLAB_CACHE_NOMAGAZINE);
418
 
419
	/* Initialize structures for malloc */
420
}